summaryrefslogtreecommitdiffstats
path: root/ansible_collections/community/general/plugins
diff options
context:
space:
mode:
Diffstat (limited to 'ansible_collections/community/general/plugins')
-rw-r--r--ansible_collections/community/general/plugins/action/iptables_state.py2
-rw-r--r--ansible_collections/community/general/plugins/action/shutdown.py88
-rw-r--r--ansible_collections/community/general/plugins/become/machinectl.py3
-rw-r--r--ansible_collections/community/general/plugins/become/pfexec.py2
-rw-r--r--ansible_collections/community/general/plugins/cache/redis.py8
-rw-r--r--ansible_collections/community/general/plugins/callback/cgroup_memory_recap.py4
-rw-r--r--ansible_collections/community/general/plugins/callback/default_without_diff.py46
-rw-r--r--ansible_collections/community/general/plugins/callback/diy.py52
-rw-r--r--ansible_collections/community/general/plugins/callback/elastic.py38
-rw-r--r--ansible_collections/community/general/plugins/callback/logentries.py18
-rw-r--r--ansible_collections/community/general/plugins/callback/mail.py12
-rw-r--r--ansible_collections/community/general/plugins/callback/nrdp.py2
-rw-r--r--ansible_collections/community/general/plugins/callback/null.py4
-rw-r--r--ansible_collections/community/general/plugins/callback/opentelemetry.py65
-rw-r--r--ansible_collections/community/general/plugins/callback/say.py2
-rw-r--r--ansible_collections/community/general/plugins/callback/selective.py21
-rw-r--r--ansible_collections/community/general/plugins/callback/slack.py1
-rw-r--r--ansible_collections/community/general/plugins/callback/splunk.py4
-rw-r--r--ansible_collections/community/general/plugins/callback/sumologic.py6
-rw-r--r--ansible_collections/community/general/plugins/callback/syslog_json.py1
-rw-r--r--ansible_collections/community/general/plugins/callback/unixy.py33
-rw-r--r--ansible_collections/community/general/plugins/connection/chroot.py45
-rw-r--r--ansible_collections/community/general/plugins/connection/funcd.py2
-rw-r--r--ansible_collections/community/general/plugins/connection/incus.py169
-rw-r--r--ansible_collections/community/general/plugins/connection/lxc.py12
-rw-r--r--ansible_collections/community/general/plugins/connection/lxd.py42
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/alicloud.py58
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/auth_basic.py8
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/bitbucket.py10
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/consul.py60
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/dimensiondata.py16
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/dimensiondata_wait.py6
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/emc.py5
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/gitlab.py5
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/hwc.py23
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/ibm_storage.py3
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/influxdb.py17
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/ipa.py45
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/keycloak.py5
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/ldap.py30
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/lxca_common.py7
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/manageiq.py12
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/nomad.py8
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/onepassword.py79
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/oneview.py16
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/online.py10
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/opennebula.py10
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/openswitch.py26
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/oracle.py45
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/oracle_creatable_resource.py4
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/oracle_display_name_option.py2
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/oracle_name_option.py2
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/oracle_wait_options.py8
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/pritunl.py2
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/proxmox.py8
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/purestorage.py14
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/rackspace.py52
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/redis.py4
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/scaleway.py8
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/utm.py11
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/vexata.py9
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/xenserver.py10
-rw-r--r--ansible_collections/community/general/plugins/filter/from_csv.py10
-rw-r--r--ansible_collections/community/general/plugins/filter/from_ini.py99
-rw-r--r--ansible_collections/community/general/plugins/filter/jc.py6
-rw-r--r--ansible_collections/community/general/plugins/filter/lists.py210
-rw-r--r--ansible_collections/community/general/plugins/filter/lists_difference.yml48
-rw-r--r--ansible_collections/community/general/plugins/filter/lists_intersect.yml48
-rw-r--r--ansible_collections/community/general/plugins/filter/lists_mergeby.py4
-rw-r--r--ansible_collections/community/general/plugins/filter/lists_symmetric_difference.yml48
-rw-r--r--ansible_collections/community/general/plugins/filter/lists_union.yml48
-rw-r--r--ansible_collections/community/general/plugins/filter/to_days.yml10
-rw-r--r--ansible_collections/community/general/plugins/filter/to_hours.yml10
-rw-r--r--ansible_collections/community/general/plugins/filter/to_ini.py105
-rw-r--r--ansible_collections/community/general/plugins/filter/to_milliseconds.yml10
-rw-r--r--ansible_collections/community/general/plugins/filter/to_minutes.yml10
-rw-r--r--ansible_collections/community/general/plugins/filter/to_months.yml10
-rw-r--r--ansible_collections/community/general/plugins/filter/to_seconds.yml10
-rw-r--r--ansible_collections/community/general/plugins/filter/to_time_unit.yml10
-rw-r--r--ansible_collections/community/general/plugins/filter/to_weeks.yml10
-rw-r--r--ansible_collections/community/general/plugins/filter/to_years.yml10
-rw-r--r--ansible_collections/community/general/plugins/inventory/cobbler.py134
-rw-r--r--ansible_collections/community/general/plugins/inventory/gitlab_runners.py8
-rw-r--r--ansible_collections/community/general/plugins/inventory/icinga2.py46
-rw-r--r--ansible_collections/community/general/plugins/inventory/linode.py45
-rw-r--r--ansible_collections/community/general/plugins/inventory/lxd.py81
-rw-r--r--ansible_collections/community/general/plugins/inventory/nmap.py52
-rw-r--r--ansible_collections/community/general/plugins/inventory/online.py17
-rw-r--r--ansible_collections/community/general/plugins/inventory/opennebula.py18
-rw-r--r--ansible_collections/community/general/plugins/inventory/proxmox.py70
-rw-r--r--ansible_collections/community/general/plugins/inventory/scaleway.py7
-rw-r--r--ansible_collections/community/general/plugins/inventory/stackpath_compute.py3
-rw-r--r--ansible_collections/community/general/plugins/inventory/virtualbox.py13
-rw-r--r--ansible_collections/community/general/plugins/inventory/xen_orchestra.py11
-rw-r--r--ansible_collections/community/general/plugins/lookup/bitwarden.py121
-rw-r--r--ansible_collections/community/general/plugins/lookup/bitwarden_secrets_manager.py125
-rw-r--r--ansible_collections/community/general/plugins/lookup/collection_version.py29
-rw-r--r--ansible_collections/community/general/plugins/lookup/consul_kv.py13
-rw-r--r--ansible_collections/community/general/plugins/lookup/dependent.py2
-rw-r--r--ansible_collections/community/general/plugins/lookup/dig.py28
-rw-r--r--ansible_collections/community/general/plugins/lookup/dnstxt.py4
-rw-r--r--ansible_collections/community/general/plugins/lookup/dsv.py10
-rw-r--r--ansible_collections/community/general/plugins/lookup/etcd.py10
-rw-r--r--ansible_collections/community/general/plugins/lookup/etcd3.py22
-rw-r--r--ansible_collections/community/general/plugins/lookup/filetree.py2
-rw-r--r--ansible_collections/community/general/plugins/lookup/flattened.py2
-rw-r--r--ansible_collections/community/general/plugins/lookup/github_app_access_token.py156
-rw-r--r--ansible_collections/community/general/plugins/lookup/lmdb_kv.py2
-rw-r--r--ansible_collections/community/general/plugins/lookup/merge_variables.py40
-rw-r--r--ansible_collections/community/general/plugins/lookup/onepassword.py214
-rw-r--r--ansible_collections/community/general/plugins/lookup/onepassword_doc.py104
-rw-r--r--ansible_collections/community/general/plugins/lookup/onepassword_raw.py61
-rw-r--r--ansible_collections/community/general/plugins/lookup/passwordstore.py66
-rw-r--r--ansible_collections/community/general/plugins/lookup/random_string.py39
-rw-r--r--ansible_collections/community/general/plugins/lookup/revbitspss.py4
-rw-r--r--ansible_collections/community/general/plugins/lookup/tss.py195
-rw-r--r--ansible_collections/community/general/plugins/module_utils/cmd_runner.py23
-rw-r--r--ansible_collections/community/general/plugins/module_utils/consul.py321
-rw-r--r--ansible_collections/community/general/plugins/module_utils/dimensiondata.py2
-rw-r--r--ansible_collections/community/general/plugins/module_utils/gio_mime.py32
-rw-r--r--ansible_collections/community/general/plugins/module_utils/gitlab.py71
-rw-r--r--ansible_collections/community/general/plugins/module_utils/hwc_utils.py6
-rw-r--r--ansible_collections/community/general/plugins/module_utils/identity/keycloak/keycloak.py934
-rw-r--r--ansible_collections/community/general/plugins/module_utils/ldap.py18
-rw-r--r--ansible_collections/community/general/plugins/module_utils/locale_gen.py31
-rw-r--r--ansible_collections/community/general/plugins/module_utils/lxd.py10
-rw-r--r--ansible_collections/community/general/plugins/module_utils/memset.py5
-rw-r--r--ansible_collections/community/general/plugins/module_utils/mh/mixins/cmd.py205
-rw-r--r--ansible_collections/community/general/plugins/module_utils/mh/mixins/deps.py14
-rw-r--r--ansible_collections/community/general/plugins/module_utils/mh/mixins/vars.py18
-rw-r--r--ansible_collections/community/general/plugins/module_utils/mh/module_helper.py18
-rw-r--r--ansible_collections/community/general/plugins/module_utils/module_helper.py20
-rw-r--r--ansible_collections/community/general/plugins/module_utils/net_tools/pritunl/api.py14
-rw-r--r--ansible_collections/community/general/plugins/module_utils/ocapi_utils.py2
-rw-r--r--ansible_collections/community/general/plugins/module_utils/oracle/oci_utils.py39
-rw-r--r--ansible_collections/community/general/plugins/module_utils/pipx.py2
-rw-r--r--ansible_collections/community/general/plugins/module_utils/proxmox.py83
-rw-r--r--ansible_collections/community/general/plugins/module_utils/redfish_utils.py625
-rw-r--r--ansible_collections/community/general/plugins/module_utils/redhat.py52
-rw-r--r--ansible_collections/community/general/plugins/module_utils/rundeck.py4
-rw-r--r--ansible_collections/community/general/plugins/module_utils/scaleway.py2
-rw-r--r--ansible_collections/community/general/plugins/module_utils/snap.py48
-rw-r--r--ansible_collections/community/general/plugins/module_utils/vardict.py197
-rw-r--r--ansible_collections/community/general/plugins/module_utils/version.py11
-rw-r--r--ansible_collections/community/general/plugins/module_utils/wdc_redfish_utils.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/airbrake_deployment.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/aix_devices.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/aix_filesystem.py70
-rw-r--r--ansible_collections/community/general/plugins/modules/aix_inittab.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/aix_lvg.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/aix_lvol.py10
-rw-r--r--ansible_collections/community/general/plugins/modules/alerta_customer.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/ali_instance.py30
-rw-r--r--ansible_collections/community/general/plugins/modules/ali_instance_info.py9
-rw-r--r--ansible_collections/community/general/plugins/modules/alternatives.py12
-rw-r--r--ansible_collections/community/general/plugins/modules/ansible_galaxy_install.py139
-rw-r--r--ansible_collections/community/general/plugins/modules/apache2_module.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/apk.py18
-rw-r--r--ansible_collections/community/general/plugins/modules/apt_repo.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/apt_rpm.py76
-rw-r--r--ansible_collections/community/general/plugins/modules/archive.py25
-rw-r--r--ansible_collections/community/general/plugins/modules/atomic_container.py1
-rw-r--r--ansible_collections/community/general/plugins/modules/atomic_host.py3
-rw-r--r--ansible_collections/community/general/plugins/modules/atomic_image.py3
-rw-r--r--ansible_collections/community/general/plugins/modules/awall.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/bearychat.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/bigpanda.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/bitbucket_access_key.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/bitbucket_pipeline_key_pair.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/bitbucket_pipeline_known_host.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/bitbucket_pipeline_variable.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/btrfs_subvolume.py14
-rw-r--r--ansible_collections/community/general/plugins/modules/bundler.py26
-rw-r--r--ansible_collections/community/general/plugins/modules/bzr.py7
-rw-r--r--ansible_collections/community/general/plugins/modules/capabilities.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/cargo.py40
-rw-r--r--ansible_collections/community/general/plugins/modules/catapult.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/circonus_annotation.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/cisco_webex.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/clc_firewall_policy.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/clc_server.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/cloudflare_dns.py114
-rw-r--r--ansible_collections/community/general/plugins/modules/cobbler_sync.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/cobbler_system.py12
-rw-r--r--ansible_collections/community/general/plugins/modules/composer.py19
-rw-r--r--ansible_collections/community/general/plugins/modules/consul.py103
-rw-r--r--ansible_collections/community/general/plugins/modules/consul_acl.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/consul_acl_bootstrap.py108
-rw-r--r--ansible_collections/community/general/plugins/modules/consul_auth_method.py207
-rw-r--r--ansible_collections/community/general/plugins/modules/consul_binding_rule.py183
-rw-r--r--ansible_collections/community/general/plugins/modules/consul_kv.py28
-rw-r--r--ansible_collections/community/general/plugins/modules/consul_policy.py164
-rw-r--r--ansible_collections/community/general/plugins/modules/consul_role.py281
-rw-r--r--ansible_collections/community/general/plugins/modules/consul_session.py186
-rw-r--r--ansible_collections/community/general/plugins/modules/consul_token.py331
-rw-r--r--ansible_collections/community/general/plugins/modules/copr.py26
-rw-r--r--ansible_collections/community/general/plugins/modules/cpanm.py51
-rw-r--r--ansible_collections/community/general/plugins/modules/cronvar.py13
-rw-r--r--ansible_collections/community/general/plugins/modules/crypttab.py20
-rw-r--r--ansible_collections/community/general/plugins/modules/datadog_downtime.py9
-rw-r--r--ansible_collections/community/general/plugins/modules/datadog_event.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/datadog_monitor.py50
-rw-r--r--ansible_collections/community/general/plugins/modules/dconf.py10
-rw-r--r--ansible_collections/community/general/plugins/modules/deploy_helper.py47
-rw-r--r--ansible_collections/community/general/plugins/modules/dimensiondata_network.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/dimensiondata_vlan.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/discord.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/django_manage.py70
-rw-r--r--ansible_collections/community/general/plugins/modules/dnf_config_manager.py225
-rw-r--r--ansible_collections/community/general/plugins/modules/dnf_versionlock.py16
-rw-r--r--ansible_collections/community/general/plugins/modules/dnsimple.py16
-rw-r--r--ansible_collections/community/general/plugins/modules/dnsimple_info.py36
-rw-r--r--ansible_collections/community/general/plugins/modules/dnsmadeeasy.py14
-rw-r--r--ansible_collections/community/general/plugins/modules/dpkg_divert.py26
-rw-r--r--ansible_collections/community/general/plugins/modules/easy_install.py18
-rw-r--r--ansible_collections/community/general/plugins/modules/ejabberd_user.py41
-rw-r--r--ansible_collections/community/general/plugins/modules/elasticsearch_plugin.py1
-rw-r--r--ansible_collections/community/general/plugins/modules/emc_vnx_sg_member.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/etcd3.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/facter.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/facter_facts.py90
-rw-r--r--ansible_collections/community/general/plugins/modules/filesize.py38
-rw-r--r--ansible_collections/community/general/plugins/modules/filesystem.py132
-rw-r--r--ansible_collections/community/general/plugins/modules/flatpak.py14
-rw-r--r--ansible_collections/community/general/plugins/modules/flatpak_remote.py22
-rw-r--r--ansible_collections/community/general/plugins/modules/flowdock.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/gandi_livedns.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/gconftool2.py52
-rw-r--r--ansible_collections/community/general/plugins/modules/gem.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/gio_mime.py108
-rw-r--r--ansible_collections/community/general/plugins/modules/git_config.py172
-rw-r--r--ansible_collections/community/general/plugins/modules/git_config_info.py187
-rw-r--r--ansible_collections/community/general/plugins/modules/github_deploy_key.py12
-rw-r--r--ansible_collections/community/general/plugins/modules/github_key.py16
-rw-r--r--ansible_collections/community/general/plugins/modules/github_release.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/github_repo.py28
-rw-r--r--ansible_collections/community/general/plugins/modules/github_webhook.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/github_webhook_info.py1
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_branch.py10
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_deploy_key.py16
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_group.py35
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_group_access_token.py320
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_group_members.py57
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_group_variable.py130
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_hook.py27
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_instance_variable.py360
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_issue.py408
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_label.py500
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_merge_request.py416
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_milestone.py496
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_project.py91
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_project_access_token.py318
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_project_badge.py17
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_project_members.py36
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_project_variable.py149
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_protected_branch.py16
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_runner.py137
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_user.py56
-rw-r--r--ansible_collections/community/general/plugins/modules/grove.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/hana_query.py219
-rw-r--r--ansible_collections/community/general/plugins/modules/haproxy.py12
-rw-r--r--ansible_collections/community/general/plugins/modules/heroku_collaborator.py10
-rw-r--r--ansible_collections/community/general/plugins/modules/hg.py7
-rw-r--r--ansible_collections/community/general/plugins/modules/hipchat.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/homebrew.py31
-rw-r--r--ansible_collections/community/general/plugins/modules/homebrew_tap.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/homectl.py30
-rw-r--r--ansible_collections/community/general/plugins/modules/honeybadger_deployment.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/hpilo_info.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/htpasswd.py115
-rw-r--r--ansible_collections/community/general/plugins/modules/hwc_ecs_instance.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/hwc_smn_topic.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/hwc_vpc_eip.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/hwc_vpc_private_ip.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/hwc_vpc_route.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/hwc_vpc_security_group.py14
-rw-r--r--ansible_collections/community/general/plugins/modules/hwc_vpc_security_group_rule.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/hwc_vpc_subnet.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/icinga2_feature.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/icinga2_host.py20
-rw-r--r--ansible_collections/community/general/plugins/modules/idrac_redfish_config.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/idrac_redfish_info.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/ilo_redfish_command.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/imc_rest.py26
-rw-r--r--ansible_collections/community/general/plugins/modules/imgadm.py15
-rw-r--r--ansible_collections/community/general/plugins/modules/influxdb_database.py1
-rw-r--r--ansible_collections/community/general/plugins/modules/influxdb_query.py1
-rw-r--r--ansible_collections/community/general/plugins/modules/influxdb_retention_policy.py9
-rw-r--r--ansible_collections/community/general/plugins/modules/influxdb_user.py1
-rw-r--r--ansible_collections/community/general/plugins/modules/influxdb_write.py1
-rw-r--r--ansible_collections/community/general/plugins/modules/ini_file.py172
-rw-r--r--ansible_collections/community/general/plugins/modules/installp.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/interfaces_file.py54
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_config.py42
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_dnsrecord.py31
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_dnszone.py3
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_group.py18
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_hbacrule.py12
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_host.py3
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_hostgroup.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_otptoken.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_pwpolicy.py89
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_sudorule.py58
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_user.py16
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_vault.py1
-rw-r--r--ansible_collections/community/general/plugins/modules/ipbase_info.py304
-rw-r--r--ansible_collections/community/general/plugins/modules/ipify_facts.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/ipmi_boot.py1
-rw-r--r--ansible_collections/community/general/plugins/modules/ipmi_power.py17
-rw-r--r--ansible_collections/community/general/plugins/modules/iptables_state.py68
-rw-r--r--ansible_collections/community/general/plugins/modules/ipwcli_dns.py16
-rw-r--r--ansible_collections/community/general/plugins/modules/irc.py54
-rw-r--r--ansible_collections/community/general/plugins/modules/iso_create.py21
-rw-r--r--ansible_collections/community/general/plugins/modules/iso_customize.py7
-rw-r--r--ansible_collections/community/general/plugins/modules/iso_extract.py11
-rw-r--r--ansible_collections/community/general/plugins/modules/java_cert.py94
-rw-r--r--ansible_collections/community/general/plugins/modules/java_keystore.py32
-rw-r--r--ansible_collections/community/general/plugins/modules/jboss.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/jenkins_build.py31
-rw-r--r--ansible_collections/community/general/plugins/modules/jenkins_build_info.py210
-rw-r--r--ansible_collections/community/general/plugins/modules/jenkins_job.py14
-rw-r--r--ansible_collections/community/general/plugins/modules/jenkins_job_info.py10
-rw-r--r--ansible_collections/community/general/plugins/modules/jenkins_plugin.py51
-rw-r--r--ansible_collections/community/general/plugins/modules/jenkins_script.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/jira.py30
-rw-r--r--ansible_collections/community/general/plugins/modules/kdeconfig.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/kernel_blacklist.py15
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_authentication.py149
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_authentication_required_actions.py457
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_authz_authorization_scope.py12
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_authz_custom_policy.py211
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_authz_permission.py433
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_authz_permission_info.py173
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_client.py117
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_client_rolemapping.py61
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_clientscope.py52
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_clientscope_type.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_clientsecret_info.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_clienttemplate.py67
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_component_info.py169
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_group.py10
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_identity_provider.py36
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_realm.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_realm_key.py475
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_realm_rolemapping.py391
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_role.py81
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_user.py542
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_user_federation.py69
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_user_rolemapping.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/keyring.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/kibana_plugin.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/launchd.py10
-rw-r--r--ansible_collections/community/general/plugins/modules/layman.py15
-rw-r--r--ansible_collections/community/general/plugins/modules/ldap_attrs.py67
-rw-r--r--ansible_collections/community/general/plugins/modules/ldap_entry.py17
-rw-r--r--ansible_collections/community/general/plugins/modules/ldap_passwd.py11
-rw-r--r--ansible_collections/community/general/plugins/modules/ldap_search.py117
-rw-r--r--ansible_collections/community/general/plugins/modules/linode.py7
-rw-r--r--ansible_collections/community/general/plugins/modules/linode_v4.py5
-rw-r--r--ansible_collections/community/general/plugins/modules/listen_ports_facts.py13
-rw-r--r--ansible_collections/community/general/plugins/modules/locale_gen.py334
-rw-r--r--ansible_collections/community/general/plugins/modules/lvg.py367
-rw-r--r--ansible_collections/community/general/plugins/modules/lvg_rename.py170
-rw-r--r--ansible_collections/community/general/plugins/modules/lvol.py43
-rw-r--r--ansible_collections/community/general/plugins/modules/lxc_container.py24
-rw-r--r--ansible_collections/community/general/plugins/modules/lxd_container.py78
-rw-r--r--ansible_collections/community/general/plugins/modules/lxd_profile.py15
-rw-r--r--ansible_collections/community/general/plugins/modules/lxd_project.py17
-rw-r--r--ansible_collections/community/general/plugins/modules/macports.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/mail.py36
-rw-r--r--ansible_collections/community/general/plugins/modules/make.py54
-rw-r--r--ansible_collections/community/general/plugins/modules/manageiq_alert_profiles.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/manageiq_alerts.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/manageiq_group.py22
-rw-r--r--ansible_collections/community/general/plugins/modules/manageiq_policies.py53
-rw-r--r--ansible_collections/community/general/plugins/modules/manageiq_policies_info.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/manageiq_provider.py101
-rw-r--r--ansible_collections/community/general/plugins/modules/manageiq_tags.py45
-rw-r--r--ansible_collections/community/general/plugins/modules/manageiq_tags_info.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/manageiq_tenant.py14
-rw-r--r--ansible_collections/community/general/plugins/modules/manageiq_user.py14
-rw-r--r--ansible_collections/community/general/plugins/modules/mas.py20
-rw-r--r--ansible_collections/community/general/plugins/modules/mattermost.py10
-rw-r--r--ansible_collections/community/general/plugins/modules/maven_artifact.py34
-rw-r--r--ansible_collections/community/general/plugins/modules/memset_dns_reload.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/memset_memstore_info.py5
-rw-r--r--ansible_collections/community/general/plugins/modules/memset_server_info.py5
-rw-r--r--ansible_collections/community/general/plugins/modules/memset_zone.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/memset_zone_domain.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/memset_zone_record.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/modprobe.py12
-rw-r--r--ansible_collections/community/general/plugins/modules/monit.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/mqtt.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/mssql_db.py1
-rw-r--r--ansible_collections/community/general/plugins/modules/mssql_script.py95
-rw-r--r--ansible_collections/community/general/plugins/modules/nagios.py33
-rw-r--r--ansible_collections/community/general/plugins/modules/netcup_dns.py17
-rw-r--r--ansible_collections/community/general/plugins/modules/newrelic_deployment.py46
-rw-r--r--ansible_collections/community/general/plugins/modules/nexmo.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/nictagadm.py14
-rw-r--r--ansible_collections/community/general/plugins/modules/nmcli.py462
-rw-r--r--ansible_collections/community/general/plugins/modules/nomad_job.py14
-rw-r--r--ansible_collections/community/general/plugins/modules/nomad_job_info.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/nomad_token.py301
-rw-r--r--ansible_collections/community/general/plugins/modules/nosh.py20
-rw-r--r--ansible_collections/community/general/plugins/modules/npm.py95
-rw-r--r--ansible_collections/community/general/plugins/modules/nsupdate.py17
-rw-r--r--ansible_collections/community/general/plugins/modules/ocapi_command.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/ocapi_info.py16
-rw-r--r--ansible_collections/community/general/plugins/modules/oci_vcn.py12
-rw-r--r--ansible_collections/community/general/plugins/modules/odbc.py3
-rw-r--r--ansible_collections/community/general/plugins/modules/one_host.py10
-rw-r--r--ansible_collections/community/general/plugins/modules/one_image.py20
-rw-r--r--ansible_collections/community/general/plugins/modules/one_image_info.py13
-rw-r--r--ansible_collections/community/general/plugins/modules/one_service.py16
-rw-r--r--ansible_collections/community/general/plugins/modules/one_template.py24
-rw-r--r--ansible_collections/community/general/plugins/modules/one_vm.py96
-rw-r--r--ansible_collections/community/general/plugins/modules/oneandone_firewall_policy.py1
-rw-r--r--ansible_collections/community/general/plugins/modules/oneandone_load_balancer.py5
-rw-r--r--ansible_collections/community/general/plugins/modules/oneandone_monitoring_policy.py1
-rw-r--r--ansible_collections/community/general/plugins/modules/oneandone_private_network.py1
-rw-r--r--ansible_collections/community/general/plugins/modules/oneandone_public_ip.py1
-rw-r--r--ansible_collections/community/general/plugins/modules/oneandone_server.py3
-rw-r--r--ansible_collections/community/general/plugins/modules/onepassword_info.py9
-rw-r--r--ansible_collections/community/general/plugins/modules/oneview_datacenter_info.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/oneview_enclosure_info.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/oneview_ethernet_network.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/oneview_ethernet_network_info.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/oneview_fc_network.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/oneview_fc_network_info.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/oneview_fcoe_network.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/oneview_fcoe_network_info.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_info.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/oneview_network_set.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/oneview_network_set_info.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/oneview_san_manager.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/oneview_san_manager_info.py10
-rw-r--r--ansible_collections/community/general/plugins/modules/open_iscsi.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/openbsd_pkg.py20
-rw-r--r--ansible_collections/community/general/plugins/modules/openwrt_init.py7
-rw-r--r--ansible_collections/community/general/plugins/modules/opkg.py21
-rw-r--r--ansible_collections/community/general/plugins/modules/osx_defaults.py7
-rw-r--r--ansible_collections/community/general/plugins/modules/ovh_ip_failover.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/ovh_monthly_billing.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/pacemaker_cluster.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/packet_device.py16
-rw-r--r--ansible_collections/community/general/plugins/modules/packet_ip_subnet.py9
-rw-r--r--ansible_collections/community/general/plugins/modules/packet_project.py3
-rw-r--r--ansible_collections/community/general/plugins/modules/packet_sshkey.py3
-rw-r--r--ansible_collections/community/general/plugins/modules/packet_volume.py3
-rw-r--r--ansible_collections/community/general/plugins/modules/packet_volume_attachment.py3
-rw-r--r--ansible_collections/community/general/plugins/modules/pacman.py86
-rw-r--r--ansible_collections/community/general/plugins/modules/pacman_key.py10
-rw-r--r--ansible_collections/community/general/plugins/modules/pagerduty.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/pagerduty_alert.py286
-rw-r--r--ansible_collections/community/general/plugins/modules/pagerduty_change.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/pagerduty_user.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/pam_limits.py39
-rw-r--r--ansible_collections/community/general/plugins/modules/pamd.py24
-rw-r--r--ansible_collections/community/general/plugins/modules/parted.py44
-rw-r--r--ansible_collections/community/general/plugins/modules/pear.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/pids.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/pip_package_info.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/pipx.py26
-rw-r--r--ansible_collections/community/general/plugins/modules/pipx_info.py10
-rw-r--r--ansible_collections/community/general/plugins/modules/pkg5.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/pkgin.py11
-rw-r--r--ansible_collections/community/general/plugins/modules/pkgng.py23
-rw-r--r--ansible_collections/community/general/plugins/modules/pkgutil.py10
-rw-r--r--ansible_collections/community/general/plugins/modules/pmem.py28
-rw-r--r--ansible_collections/community/general/plugins/modules/pnpm.py462
-rw-r--r--ansible_collections/community/general/plugins/modules/portage.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/pritunl_org.py10
-rw-r--r--ansible_collections/community/general/plugins/modules/pritunl_user.py18
-rw-r--r--ansible_collections/community/general/plugins/modules/pritunl_user_info.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/profitbricks.py3
-rw-r--r--ansible_collections/community/general/plugins/modules/profitbricks_datacenter.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/profitbricks_nic.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/profitbricks_volume.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/profitbricks_volume_attachments.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox.py412
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_disk.py188
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_kvm.py497
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_nic.py10
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_node_info.py140
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_pool.py180
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_pool_member.py238
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_snap.py47
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_storage_contents_info.py144
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_storage_info.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_tasks_info.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_template.py98
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_user_info.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_vm_info.py267
-rw-r--r--ansible_collections/community/general/plugins/modules/pubnub_blocks.py21
-rw-r--r--ansible_collections/community/general/plugins/modules/pulp_repo.py16
-rw-r--r--ansible_collections/community/general/plugins/modules/puppet.py18
-rw-r--r--ansible_collections/community/general/plugins/modules/pushbullet.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/python_requirements_info.py7
-rw-r--r--ansible_collections/community/general/plugins/modules/rax.py30
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_cbs.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_cbs_attachments.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_cdb.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_cdb_database.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_cdb_user.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_clb.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_clb_nodes.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_clb_ssl.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_dns.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_dns_record.py10
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_facts.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_files.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_files_objects.py14
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_identity.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_keypair.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_meta.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_mon_alarm.py16
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_mon_check.py66
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_mon_entity.py12
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_mon_notification.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_mon_notification_plan.py12
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_scaling_group.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_scaling_policy.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/read_csv.py18
-rw-r--r--ansible_collections/community/general/plugins/modules/redfish_command.py113
-rw-r--r--ansible_collections/community/general/plugins/modules/redfish_config.py124
-rw-r--r--ansible_collections/community/general/plugins/modules/redfish_info.py51
-rw-r--r--ansible_collections/community/general/plugins/modules/redhat_subscription.py180
-rw-r--r--ansible_collections/community/general/plugins/modules/redis.py10
-rw-r--r--ansible_collections/community/general/plugins/modules/redis_data.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/redis_data_incr.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/redis_info.py48
-rw-r--r--ansible_collections/community/general/plugins/modules/rhevm.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/rhn_channel.py14
-rw-r--r--ansible_collections/community/general/plugins/modules/rhn_register.py18
-rw-r--r--ansible_collections/community/general/plugins/modules/rhsm_release.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/rhsm_repository.py184
-rw-r--r--ansible_collections/community/general/plugins/modules/riak.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/rocketchat.py16
-rw-r--r--ansible_collections/community/general/plugins/modules/rollbar_deployment.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/rpm_ostree_pkg.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/rundeck_acl_policy.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/rundeck_job_run.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/runit.py10
-rw-r--r--ansible_collections/community/general/plugins/modules/sap_task_list_execute.py348
-rw-r--r--ansible_collections/community/general/plugins/modules/sapcar_extract.py228
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_compute.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_compute_private_network.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_container.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_container_namespace.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_container_registry.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_database_backup.py28
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_function.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_function_namespace.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_ip.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_private_network.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_security_group.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_security_group_rule.py23
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_sshkey.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_volume.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/sefcontext.py36
-rw-r--r--ansible_collections/community/general/plugins/modules/selinux_permissive.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/sendgrid.py5
-rw-r--r--ansible_collections/community/general/plugins/modules/sensu_check.py18
-rw-r--r--ansible_collections/community/general/plugins/modules/sensu_client.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/serverless.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/shutdown.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/simpleinit_msb.py322
-rw-r--r--ansible_collections/community/general/plugins/modules/sl_vm.py3
-rw-r--r--ansible_collections/community/general/plugins/modules/slack.py41
-rw-r--r--ansible_collections/community/general/plugins/modules/slackpkg.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/smartos_image_info.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/snap.py254
-rw-r--r--ansible_collections/community/general/plugins/modules/snap_alias.py33
-rw-r--r--ansible_collections/community/general/plugins/modules/snmp_facts.py18
-rw-r--r--ansible_collections/community/general/plugins/modules/solaris_zone.py24
-rw-r--r--ansible_collections/community/general/plugins/modules/sorcery.py255
-rw-r--r--ansible_collections/community/general/plugins/modules/spectrum_device.py14
-rw-r--r--ansible_collections/community/general/plugins/modules/spectrum_model_attrs.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/spotinst_aws_elastigroup.py21
-rw-r--r--ansible_collections/community/general/plugins/modules/ssh_config.py82
-rw-r--r--ansible_collections/community/general/plugins/modules/stackdriver.py5
-rw-r--r--ansible_collections/community/general/plugins/modules/stacki_host.py14
-rw-r--r--ansible_collections/community/general/plugins/modules/statsd.py12
-rw-r--r--ansible_collections/community/general/plugins/modules/statusio_maintenance.py13
-rw-r--r--ansible_collections/community/general/plugins/modules/sudoers.py36
-rw-r--r--ansible_collections/community/general/plugins/modules/supervisorctl.py33
-rw-r--r--ansible_collections/community/general/plugins/modules/svc.py10
-rw-r--r--ansible_collections/community/general/plugins/modules/svr4pkg.py14
-rw-r--r--ansible_collections/community/general/plugins/modules/swdepot.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/swupd.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/sysrc.py24
-rw-r--r--ansible_collections/community/general/plugins/modules/sysupgrade.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/telegram.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/terraform.py103
-rw-r--r--ansible_collections/community/general/plugins/modules/timezone.py12
-rw-r--r--ansible_collections/community/general/plugins/modules/udm_dns_record.py16
-rw-r--r--ansible_collections/community/general/plugins/modules/udm_dns_zone.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/udm_share.py7
-rw-r--r--ansible_collections/community/general/plugins/modules/udm_user.py36
-rw-r--r--ansible_collections/community/general/plugins/modules/ufw.py44
-rw-r--r--ansible_collections/community/general/plugins/modules/urpmi.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/usb_facts.py113
-rw-r--r--ansible_collections/community/general/plugins/modules/utm_proxy_location.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/utm_proxy_location_info.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/vdo.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/vertica_info.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/vertica_role.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/vertica_schema.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/vertica_user.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/vmadm.py28
-rw-r--r--ansible_collections/community/general/plugins/modules/wdc_redfish_command.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/wdc_redfish_info.py4
-rw-r--r--ansible_collections/community/general/plugins/modules/webfaction_app.py12
-rw-r--r--ansible_collections/community/general/plugins/modules/webfaction_db.py12
-rw-r--r--ansible_collections/community/general/plugins/modules/webfaction_domain.py16
-rw-r--r--ansible_collections/community/general/plugins/modules/webfaction_mailbox.py12
-rw-r--r--ansible_collections/community/general/plugins/modules/webfaction_site.py14
-rw-r--r--ansible_collections/community/general/plugins/modules/xattr.py17
-rw-r--r--ansible_collections/community/general/plugins/modules/xbps.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/xcc_redfish_command.py10
-rw-r--r--ansible_collections/community/general/plugins/modules/xenserver_facts.py2
-rw-r--r--ansible_collections/community/general/plugins/modules/xenserver_guest.py63
-rw-r--r--ansible_collections/community/general/plugins/modules/xenserver_guest_info.py10
-rw-r--r--ansible_collections/community/general/plugins/modules/xenserver_guest_powerstate.py15
-rw-r--r--ansible_collections/community/general/plugins/modules/xfconf.py44
-rw-r--r--ansible_collections/community/general/plugins/modules/xfconf_info.py6
-rw-r--r--ansible_collections/community/general/plugins/modules/xml.py54
-rw-r--r--ansible_collections/community/general/plugins/modules/yum_versionlock.py30
-rw-r--r--ansible_collections/community/general/plugins/modules/zfs.py8
-rw-r--r--ansible_collections/community/general/plugins/modules/zfs_delegate_admin.py18
-rw-r--r--ansible_collections/community/general/plugins/modules/znode.py7
-rw-r--r--ansible_collections/community/general/plugins/modules/zypper.py26
-rw-r--r--ansible_collections/community/general/plugins/modules/zypper_repository.py5
-rw-r--r--ansible_collections/community/general/plugins/test/fqdn_valid.py103
636 files changed, 23392 insertions, 7095 deletions
diff --git a/ansible_collections/community/general/plugins/action/iptables_state.py b/ansible_collections/community/general/plugins/action/iptables_state.py
index f59a7298b..4a27ef8a0 100644
--- a/ansible_collections/community/general/plugins/action/iptables_state.py
+++ b/ansible_collections/community/general/plugins/action/iptables_state.py
@@ -44,7 +44,7 @@ class ActionModule(ActionBase):
def _async_result(self, async_status_args, task_vars, timeout):
'''
- Retrieve results of the asynchonous task, and display them in place of
+ Retrieve results of the asynchronous task, and display them in place of
the async wrapper results (those with the ansible_job_id key).
'''
async_status = self._task.copy()
diff --git a/ansible_collections/community/general/plugins/action/shutdown.py b/ansible_collections/community/general/plugins/action/shutdown.py
index c2860f1d6..01201a640 100644
--- a/ansible_collections/community/general/plugins/action/shutdown.py
+++ b/ansible_collections/community/general/plugins/action/shutdown.py
@@ -6,6 +6,7 @@
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
+
__metaclass__ = type
from ansible.errors import AnsibleError, AnsibleConnectionFailure
@@ -44,7 +45,7 @@ class ActionModule(ActionBase):
SHUTDOWN_COMMAND_ARGS = {
'alpine': '',
'void': '-h +{delay_min} "{message}"',
- 'freebsd': '-h +{delay_sec}s "{message}"',
+ 'freebsd': '-p +{delay_sec}s "{message}"',
'linux': DEFAULT_SHUTDOWN_COMMAND_ARGS,
'macosx': '-h +{delay_min} "{message}"',
'openbsd': '-h +{delay_min} "{message}"',
@@ -80,13 +81,6 @@ class ActionModule(ActionBase):
getattr(self, default_value))))
return value
- def get_shutdown_command_args(self, distribution):
- args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS')
- # Convert seconds to minutes. If less that 60, set it to 0.
- delay_sec = self.delay
- shutdown_message = self._task.args.get('msg', self.DEFAULT_SHUTDOWN_MESSAGE)
- return args.format(delay_sec=delay_sec, delay_min=delay_sec // 60, message=shutdown_message)
-
def get_distribution(self, task_vars):
# FIXME: only execute the module if we don't already have the facts we need
distribution = {}
@@ -101,7 +95,8 @@ class ActionModule(ActionBase):
to_native(module_output['module_stdout']).strip(),
to_native(module_output['module_stderr']).strip()))
distribution['name'] = module_output['ansible_facts']['ansible_distribution'].lower()
- distribution['version'] = to_text(module_output['ansible_facts']['ansible_distribution_version'].split('.')[0])
+ distribution['version'] = to_text(
+ module_output['ansible_facts']['ansible_distribution_version'].split('.')[0])
distribution['family'] = to_text(module_output['ansible_facts']['ansible_os_family'].lower())
display.debug("{action}: distribution: {dist}".format(action=self._task.action, dist=distribution))
return distribution
@@ -109,6 +104,23 @@ class ActionModule(ActionBase):
raise AnsibleError('Failed to get distribution information. Missing "{0}" in output.'.format(ke.args[0]))
def get_shutdown_command(self, task_vars, distribution):
+ def find_command(command, find_search_paths):
+ display.debug('{action}: running find module looking in {paths} to get path for "{command}"'.format(
+ action=self._task.action,
+ command=command,
+ paths=find_search_paths))
+ find_result = self._execute_module(
+ task_vars=task_vars,
+ # prevent collection search by calling with ansible.legacy (still allows library/ override of find)
+ module_name='ansible.legacy.find',
+ module_args={
+ 'paths': find_search_paths,
+ 'patterns': [command],
+ 'file_type': 'any'
+ }
+ )
+ return [x['path'] for x in find_result['files']]
+
shutdown_bin = self._get_value_from_facts('SHUTDOWN_COMMANDS', distribution, 'DEFAULT_SHUTDOWN_COMMAND')
default_search_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
search_paths = self._task.args.get('search_paths', default_search_paths)
@@ -127,45 +139,53 @@ class ActionModule(ActionBase):
except TypeError:
raise AnsibleError(err_msg.format(search_paths))
- display.debug('{action}: running find module looking in {paths} to get path for "{command}"'.format(
- action=self._task.action,
- command=shutdown_bin,
- paths=search_paths))
- find_result = self._execute_module(
- task_vars=task_vars,
- # prevent collection search by calling with ansible.legacy (still allows library/ override of find)
- module_name='ansible.legacy.find',
- module_args={
- 'paths': search_paths,
- 'patterns': [shutdown_bin],
- 'file_type': 'any'
- }
- )
-
- full_path = [x['path'] for x in find_result['files']]
- if not full_path:
- raise AnsibleError('Unable to find command "{0}" in search paths: {1}'.format(shutdown_bin, search_paths))
- self._shutdown_command = full_path[0]
- return self._shutdown_command
+ full_path = find_command(shutdown_bin, search_paths) # find the path to the shutdown command
+ if not full_path: # if we could not find the shutdown command
+ display.vvv('Unable to find command "{0}" in search paths: {1}, will attempt a shutdown using systemd '
+ 'directly.'.format(shutdown_bin, search_paths)) # tell the user we will try with systemd
+ systemctl_search_paths = ['/bin', '/usr/bin']
+ full_path = find_command('systemctl', systemctl_search_paths) # find the path to the systemctl command
+ if not full_path: # if we couldn't find systemctl
+ raise AnsibleError(
+ 'Could not find command "{0}" in search paths: {1} or systemctl command in search paths: {2}, unable to shutdown.'.
+ format(shutdown_bin, search_paths, systemctl_search_paths)) # we give up here
+ else:
+ return "{0} poweroff".format(full_path[0]) # done, since we cannot use args with systemd shutdown
+
+ # systemd case taken care of, here we add args to the command
+ args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS')
+ # Convert seconds to minutes. If less that 60, set it to 0.
+ delay_sec = self.delay
+ shutdown_message = self._task.args.get('msg', self.DEFAULT_SHUTDOWN_MESSAGE)
+ return '{0} {1}'. \
+ format(
+ full_path[0],
+ args.format(
+ delay_sec=delay_sec,
+ delay_min=delay_sec // 60,
+ message=shutdown_message
+ )
+ )
def perform_shutdown(self, task_vars, distribution):
result = {}
shutdown_result = {}
- shutdown_command = self.get_shutdown_command(task_vars, distribution)
- shutdown_command_args = self.get_shutdown_command_args(distribution)
- shutdown_command_exec = '{0} {1}'.format(shutdown_command, shutdown_command_args)
+ shutdown_command_exec = self.get_shutdown_command(task_vars, distribution)
self.cleanup(force=True)
try:
display.vvv("{action}: shutting down server...".format(action=self._task.action))
- display.debug("{action}: shutting down server with command '{command}'".format(action=self._task.action, command=shutdown_command_exec))
+ display.debug("{action}: shutting down server with command '{command}'".
+ format(action=self._task.action, command=shutdown_command_exec))
if self._play_context.check_mode:
shutdown_result['rc'] = 0
else:
shutdown_result = self._low_level_execute_command(shutdown_command_exec, sudoable=self.DEFAULT_SUDOABLE)
except AnsibleConnectionFailure as e:
# If the connection is closed too quickly due to the system being shutdown, carry on
- display.debug('{action}: AnsibleConnectionFailure caught and handled: {error}'.format(action=self._task.action, error=to_text(e)))
+ display.debug(
+ '{action}: AnsibleConnectionFailure caught and handled: {error}'.format(action=self._task.action,
+ error=to_text(e)))
shutdown_result['rc'] = 0
if shutdown_result['rc'] != 0:
diff --git a/ansible_collections/community/general/plugins/become/machinectl.py b/ansible_collections/community/general/plugins/become/machinectl.py
index 461a3f635..9b9ac7ec5 100644
--- a/ansible_collections/community/general/plugins/become/machinectl.py
+++ b/ansible_collections/community/general/plugins/become/machinectl.py
@@ -68,7 +68,7 @@ DOCUMENTATION = '''
- section: machinectl_become_plugin
key: password
notes:
- - When not using this plugin with user C(root), it only works correctly with a polkit rule which will alter
+ - When not using this plugin with user V(root), it only works correctly with a polkit rule which will alter
the behaviour of machinectl. This rule must alter the prompt behaviour to ask directly for the user credentials,
if the user is allowed to perform the action (take a look at the examples section).
If such a rule is not present the plugin only work if it is used in context with the root user,
@@ -102,6 +102,7 @@ class BecomeModule(BecomeBase):
prompt = 'Password: '
fail = ('==== AUTHENTICATION FAILED ====',)
success = ('==== AUTHENTICATION COMPLETE ====',)
+ require_tty = True # see https://github.com/ansible-collections/community.general/issues/6932
@staticmethod
def remove_ansi_codes(line):
diff --git a/ansible_collections/community/general/plugins/become/pfexec.py b/ansible_collections/community/general/plugins/become/pfexec.py
index 392ee961f..2468a28a9 100644
--- a/ansible_collections/community/general/plugins/become/pfexec.py
+++ b/ansible_collections/community/general/plugins/become/pfexec.py
@@ -82,7 +82,7 @@ DOCUMENTATION = '''
env:
- name: ANSIBLE_PFEXEC_WRAP_EXECUTION
notes:
- - This plugin ignores I(become_user) as pfexec uses it's own C(exec_attr) to figure this out.
+ - This plugin ignores O(become_user) as pfexec uses it's own C(exec_attr) to figure this out.
'''
from ansible.plugins.become import BecomeBase
diff --git a/ansible_collections/community/general/plugins/cache/redis.py b/ansible_collections/community/general/plugins/cache/redis.py
index 8c0621717..c43b1dbb5 100644
--- a/ansible_collections/community/general/plugins/cache/redis.py
+++ b/ansible_collections/community/general/plugins/cache/redis.py
@@ -18,9 +18,9 @@ DOCUMENTATION = '''
_uri:
description:
- A colon separated string of connection information for Redis.
- - The format is C(host:port:db:password), for example C(localhost:6379:0:changeme).
- - To use encryption in transit, prefix the connection with C(tls://), as in C(tls://localhost:6379:0:changeme).
- - To use redis sentinel, use separator C(;), for example C(localhost:26379;localhost:26379;0:changeme). Requires redis>=2.9.0.
+ - The format is V(host:port:db:password), for example V(localhost:6379:0:changeme).
+ - To use encryption in transit, prefix the connection with V(tls://), as in V(tls://localhost:6379:0:changeme).
+ - To use redis sentinel, use separator V(;), for example V(localhost:26379;localhost:26379;0:changeme). Requires redis>=2.9.0.
required: true
env:
- name: ANSIBLE_CACHE_PLUGIN_CONNECTION
@@ -150,7 +150,7 @@ class CacheModule(BaseCacheModule):
# format: "localhost:26379;localhost2:26379;0:changeme"
connections = uri.split(';')
connection_args = connections.pop(-1)
- if len(connection_args) > 0: # hanle if no db nr is given
+ if len(connection_args) > 0: # handle if no db nr is given
connection_args = connection_args.split(':')
kw['db'] = connection_args.pop(0)
try:
diff --git a/ansible_collections/community/general/plugins/callback/cgroup_memory_recap.py b/ansible_collections/community/general/plugins/callback/cgroup_memory_recap.py
index ccdbcc9cf..d3961bf0c 100644
--- a/ansible_collections/community/general/plugins/callback/cgroup_memory_recap.py
+++ b/ansible_collections/community/general/plugins/callback/cgroup_memory_recap.py
@@ -24,7 +24,7 @@ DOCUMENTATION = '''
options:
max_mem_file:
required: true
- description: Path to cgroups C(memory.max_usage_in_bytes) file. Example C(/sys/fs/cgroup/memory/ansible_profile/memory.max_usage_in_bytes).
+ description: Path to cgroups C(memory.max_usage_in_bytes) file. Example V(/sys/fs/cgroup/memory/ansible_profile/memory.max_usage_in_bytes).
env:
- name: CGROUP_MAX_MEM_FILE
ini:
@@ -32,7 +32,7 @@ DOCUMENTATION = '''
key: max_mem_file
cur_mem_file:
required: true
- description: Path to C(memory.usage_in_bytes) file. Example C(/sys/fs/cgroup/memory/ansible_profile/memory.usage_in_bytes).
+ description: Path to C(memory.usage_in_bytes) file. Example V(/sys/fs/cgroup/memory/ansible_profile/memory.usage_in_bytes).
env:
- name: CGROUP_CUR_MEM_FILE
ini:
diff --git a/ansible_collections/community/general/plugins/callback/default_without_diff.py b/ansible_collections/community/general/plugins/callback/default_without_diff.py
new file mode 100644
index 000000000..c138cd445
--- /dev/null
+++ b/ansible_collections/community/general/plugins/callback/default_without_diff.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2024, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+ name: default_without_diff
+ type: stdout
+ short_description: The default ansible callback without diff output
+ version_added: 8.4.0
+ description:
+ - This is basically the default ansible callback plugin (P(ansible.builtin.default#callback)) without
+ showing diff output. This can be useful when using another callback which sends more detailed information
+ to another service, like the L(ARA, https://ara.recordsansible.org/) callback, and you want diff output
+ sent to that plugin but not shown on the console output.
+ author: Felix Fontein (@felixfontein)
+ extends_documentation_fragment:
+ - ansible.builtin.default_callback
+ - ansible.builtin.result_format_callback
+'''
+
+EXAMPLES = r'''
+# Enable callback in ansible.cfg:
+ansible_config: |
+ [defaults]
+ stdout_callback = community.general.default_without_diff
+
+# Enable callback with environment variables:
+environment_variable: |
+ ANSIBLE_STDOUT_CALLBACK=community.general.default_without_diff
+'''
+
+from ansible.plugins.callback.default import CallbackModule as Default
+
+
+class CallbackModule(Default):
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.default_without_diff'
+
+ def v2_on_file_diff(self, result):
+ pass
diff --git a/ansible_collections/community/general/plugins/callback/diy.py b/ansible_collections/community/general/plugins/callback/diy.py
index 75b3f4e24..cf9369e4b 100644
--- a/ansible_collections/community/general/plugins/callback/diy.py
+++ b/ansible_collections/community/general/plugins/callback/diy.py
@@ -18,7 +18,7 @@ DOCUMENTATION = r'''
extends_documentation_fragment:
- default_callback
notes:
- - Uses the C(default) callback plugin output when a custom callback message(C(msg)) is not provided.
+ - Uses the P(ansible.builtin.default#callback) callback plugin output when a custom callback V(message(msg\)) is not provided.
- Makes the callback event data available via the C(ansible_callback_diy) dictionary, which can be used in the templating context for the options.
The dictionary is only available in the templating context for the options. It is not a variable that is available via the other
various execution contexts, such as playbook, play, task etc.
@@ -40,8 +40,8 @@ DOCUMENTATION = r'''
if value C(is not None and not omit and length is greater than 0),
then the option is being used with output.
**Effect**: render value as template and output"
- - "Valid color values: C(black), C(bright gray), C(blue), C(white), C(green), C(bright blue), C(cyan), C(bright green), C(red), C(bright cyan),
- C(purple), C(bright red), C(yellow), C(bright purple), C(dark gray), C(bright yellow), C(magenta), C(bright magenta), C(normal)"
+ - "Valid color values: V(black), V(bright gray), V(blue), V(white), V(green), V(bright blue), V(cyan), V(bright green), V(red), V(bright cyan),
+ V(purple), V(bright red), V(yellow), V(bright purple), V(dark gray), V(bright yellow), V(magenta), V(bright magenta), V(normal)"
seealso:
- name: default – default Ansible screen output
description: The official documentation on the B(default) callback plugin.
@@ -62,7 +62,7 @@ DOCUMENTATION = r'''
on_any_msg_color:
description:
- - Output color to be used for I(on_any_msg).
+ - Output color to be used for O(on_any_msg).
- Template should render a L(valid color value,#notes).
ini:
- section: callback_diy
@@ -86,7 +86,7 @@ DOCUMENTATION = r'''
runner_on_failed_msg_color:
description:
- - Output color to be used for I(runner_on_failed_msg).
+ - Output color to be used for O(runner_on_failed_msg).
- Template should render a L(valid color value,#notes).
ini:
- section: callback_diy
@@ -110,7 +110,7 @@ DOCUMENTATION = r'''
runner_on_ok_msg_color:
description:
- - Output color to be used for I(runner_on_ok_msg).
+ - Output color to be used for O(runner_on_ok_msg).
- Template should render a L(valid color value,#notes).
ini:
- section: callback_diy
@@ -134,7 +134,7 @@ DOCUMENTATION = r'''
runner_on_skipped_msg_color:
description:
- - Output color to be used for I(runner_on_skipped_msg).
+ - Output color to be used for O(runner_on_skipped_msg).
- Template should render a L(valid color value,#notes).
ini:
- section: callback_diy
@@ -158,7 +158,7 @@ DOCUMENTATION = r'''
runner_on_unreachable_msg_color:
description:
- - Output color to be used for I(runner_on_unreachable_msg).
+ - Output color to be used for O(runner_on_unreachable_msg).
- Template should render a L(valid color value,#notes).
ini:
- section: callback_diy
@@ -182,7 +182,7 @@ DOCUMENTATION = r'''
playbook_on_start_msg_color:
description:
- - Output color to be used for I(playbook_on_start_msg).
+ - Output color to be used for O(playbook_on_start_msg).
- Template should render a L(valid color value,#notes).
ini:
- section: callback_diy
@@ -206,7 +206,7 @@ DOCUMENTATION = r'''
playbook_on_notify_msg_color:
description:
- - Output color to be used for I(playbook_on_notify_msg).
+ - Output color to be used for O(playbook_on_notify_msg).
- Template should render a L(valid color value,#notes).
ini:
- section: callback_diy
@@ -230,7 +230,7 @@ DOCUMENTATION = r'''
playbook_on_no_hosts_matched_msg_color:
description:
- - Output color to be used for I(playbook_on_no_hosts_matched_msg).
+ - Output color to be used for O(playbook_on_no_hosts_matched_msg).
- Template should render a L(valid color value,#notes).
ini:
- section: callback_diy
@@ -254,7 +254,7 @@ DOCUMENTATION = r'''
playbook_on_no_hosts_remaining_msg_color:
description:
- - Output color to be used for I(playbook_on_no_hosts_remaining_msg).
+ - Output color to be used for O(playbook_on_no_hosts_remaining_msg).
- Template should render a L(valid color value,#notes).
ini:
- section: callback_diy
@@ -278,7 +278,7 @@ DOCUMENTATION = r'''
playbook_on_task_start_msg_color:
description:
- - Output color to be used for I(playbook_on_task_start_msg).
+ - Output color to be used for O(playbook_on_task_start_msg).
- Template should render a L(valid color value,#notes).
ini:
- section: callback_diy
@@ -302,7 +302,7 @@ DOCUMENTATION = r'''
playbook_on_handler_task_start_msg_color:
description:
- - Output color to be used for I(playbook_on_handler_task_start_msg).
+ - Output color to be used for O(playbook_on_handler_task_start_msg).
- Template should render a L(valid color value,#notes).
ini:
- section: callback_diy
@@ -326,7 +326,7 @@ DOCUMENTATION = r'''
playbook_on_vars_prompt_msg_color:
description:
- - Output color to be used for I(playbook_on_vars_prompt_msg).
+ - Output color to be used for O(playbook_on_vars_prompt_msg).
- Template should render a L(valid color value,#notes).
ini:
- section: callback_diy
@@ -350,7 +350,7 @@ DOCUMENTATION = r'''
playbook_on_play_start_msg_color:
description:
- - Output color to be used for I(playbook_on_play_start_msg).
+ - Output color to be used for O(playbook_on_play_start_msg).
- Template should render a L(valid color value,#notes).
ini:
- section: callback_diy
@@ -374,7 +374,7 @@ DOCUMENTATION = r'''
playbook_on_stats_msg_color:
description:
- - Output color to be used for I(playbook_on_stats_msg).
+ - Output color to be used for O(playbook_on_stats_msg).
- Template should render a L(valid color value,#notes).
ini:
- section: callback_diy
@@ -398,7 +398,7 @@ DOCUMENTATION = r'''
on_file_diff_msg_color:
description:
- - Output color to be used for I(on_file_diff_msg).
+ - Output color to be used for O(on_file_diff_msg).
- Template should render a L(valid color value,#notes).
ini:
- section: callback_diy
@@ -422,7 +422,7 @@ DOCUMENTATION = r'''
playbook_on_include_msg_color:
description:
- - Output color to be used for I(playbook_on_include_msg).
+ - Output color to be used for O(playbook_on_include_msg).
- Template should render a L(valid color value,#notes).
ini:
- section: callback_diy
@@ -446,7 +446,7 @@ DOCUMENTATION = r'''
runner_item_on_ok_msg_color:
description:
- - Output color to be used for I(runner_item_on_ok_msg).
+ - Output color to be used for O(runner_item_on_ok_msg).
- Template should render a L(valid color value,#notes).
ini:
- section: callback_diy
@@ -470,7 +470,7 @@ DOCUMENTATION = r'''
runner_item_on_failed_msg_color:
description:
- - Output color to be used for I(runner_item_on_failed_msg).
+ - Output color to be used for O(runner_item_on_failed_msg).
- Template should render a L(valid color value,#notes).
ini:
- section: callback_diy
@@ -494,7 +494,7 @@ DOCUMENTATION = r'''
runner_item_on_skipped_msg_color:
description:
- - Output color to be used for I(runner_item_on_skipped_msg).
+ - Output color to be used for O(runner_item_on_skipped_msg).
- Template should render a L(valid color value,#notes).
ini:
- section: callback_diy
@@ -518,7 +518,7 @@ DOCUMENTATION = r'''
runner_retry_msg_color:
description:
- - Output color to be used for I(runner_retry_msg).
+ - Output color to be used for O(runner_retry_msg).
- Template should render a L(valid color value,#notes).
ini:
- section: callback_diy
@@ -542,7 +542,7 @@ DOCUMENTATION = r'''
runner_on_start_msg_color:
description:
- - Output color to be used for I(runner_on_start_msg).
+ - Output color to be used for O(runner_on_start_msg).
- Template should render a L(valid color value,#notes).
ini:
- section: callback_diy
@@ -566,7 +566,7 @@ DOCUMENTATION = r'''
runner_on_no_hosts_msg_color:
description:
- - Output color to be used for I(runner_on_no_hosts_msg).
+ - Output color to be used for O(runner_on_no_hosts_msg).
- Template should render a L(valid color value,#notes).
ini:
- section: callback_diy
@@ -590,7 +590,7 @@ DOCUMENTATION = r'''
playbook_on_setup_msg_color:
description:
- - Output color to be used for I(playbook_on_setup_msg).
+ - Output color to be used for O(playbook_on_setup_msg).
- Template should render a L(valid color value,#notes).
ini:
- section: callback_diy
diff --git a/ansible_collections/community/general/plugins/callback/elastic.py b/ansible_collections/community/general/plugins/callback/elastic.py
index 37526c155..0c94d1ba3 100644
--- a/ansible_collections/community/general/plugins/callback/elastic.py
+++ b/ansible_collections/community/general/plugins/callback/elastic.py
@@ -84,6 +84,7 @@ import time
import uuid
from collections import OrderedDict
+from contextlib import closing
from os.path import basename
from ansible.errors import AnsibleError, AnsibleRuntimeError
@@ -201,24 +202,25 @@ class ElasticSource(object):
apm_cli = self.init_apm_client(apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key)
if apm_cli:
- instrument() # Only call this once, as early as possible.
- if traceparent:
- parent = trace_parent_from_string(traceparent)
- apm_cli.begin_transaction("Session", trace_parent=parent, start=parent_start_time)
- else:
- apm_cli.begin_transaction("Session", start=parent_start_time)
- # Populate trace metadata attributes
- if self.ansible_version is not None:
- label(ansible_version=self.ansible_version)
- label(ansible_session=self.session, ansible_host_name=self.host, ansible_host_user=self.user)
- if self.ip_address is not None:
- label(ansible_host_ip=self.ip_address)
-
- for task_data in tasks:
- for host_uuid, host_data in task_data.host_data.items():
- self.create_span_data(apm_cli, task_data, host_data)
-
- apm_cli.end_transaction(name=__name__, result=status, duration=end_time - parent_start_time)
+ with closing(apm_cli):
+ instrument() # Only call this once, as early as possible.
+ if traceparent:
+ parent = trace_parent_from_string(traceparent)
+ apm_cli.begin_transaction("Session", trace_parent=parent, start=parent_start_time)
+ else:
+ apm_cli.begin_transaction("Session", start=parent_start_time)
+ # Populate trace metadata attributes
+ if self.ansible_version is not None:
+ label(ansible_version=self.ansible_version)
+ label(ansible_session=self.session, ansible_host_name=self.host, ansible_host_user=self.user)
+ if self.ip_address is not None:
+ label(ansible_host_ip=self.ip_address)
+
+ for task_data in tasks:
+ for host_uuid, host_data in task_data.host_data.items():
+ self.create_span_data(apm_cli, task_data, host_data)
+
+ apm_cli.end_transaction(name=__name__, result=status, duration=end_time - parent_start_time)
def create_span_data(self, apm_cli, task_data, host_data):
""" create the span with the given TaskData and HostData """
diff --git a/ansible_collections/community/general/plugins/callback/logentries.py b/ansible_collections/community/general/plugins/callback/logentries.py
index 22322a4df..d3feceb72 100644
--- a/ansible_collections/community/general/plugins/callback/logentries.py
+++ b/ansible_collections/community/general/plugins/callback/logentries.py
@@ -18,7 +18,7 @@ DOCUMENTATION = '''
requirements:
- whitelisting in configuration
- certifi (Python library)
- - flatdict (Python library), if you want to use the 'flatten' option
+ - flatdict (Python library), if you want to use the O(flatten) option
options:
api:
description: URI to the Logentries API.
@@ -90,9 +90,9 @@ examples: >
api = data.logentries.com
port = 10000
tls_port = 20000
- use_tls = no
+ use_tls = true
token = dd21fc88-f00a-43ff-b977-e3a4233c53af
- flatten = False
+ flatten = false
'''
import os
@@ -196,15 +196,11 @@ else:
class TLSSocketAppender(PlainTextSocketAppender):
def open_connection(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- sock = ssl.wrap_socket(
+ context = ssl.create_default_context(
+ purpose=ssl.Purpose.SERVER_AUTH,
+ cafile=certifi.where(), )
+ sock = context.wrap_socket(
sock=sock,
- keyfile=None,
- certfile=None,
- server_side=False,
- cert_reqs=ssl.CERT_REQUIRED,
- ssl_version=getattr(
- ssl, 'PROTOCOL_TLSv1_2', ssl.PROTOCOL_TLSv1),
- ca_certs=certifi.where(),
do_handshake_on_connect=True,
suppress_ragged_eofs=True, )
sock.connect((self.LE_API, self.LE_TLS_PORT))
diff --git a/ansible_collections/community/general/plugins/callback/mail.py b/ansible_collections/community/general/plugins/callback/mail.py
index 9e8314baf..1b847ea34 100644
--- a/ansible_collections/community/general/plugins/callback/mail.py
+++ b/ansible_collections/community/general/plugins/callback/mail.py
@@ -71,6 +71,16 @@ options:
ini:
- section: callback_mail
key: bcc
+ message_id_domain:
+ description:
+ - The domain name to use for the L(Message-ID header, https://en.wikipedia.org/wiki/Message-ID).
+ - The default is the hostname of the control node.
+ type: str
+ ini:
+ - section: callback_mail
+ key: message_id_domain
+ version_added: 8.2.0
+
'''
import json
@@ -131,7 +141,7 @@ class CallbackModule(CallbackBase):
content += 'To: %s\n' % ', '.join([email.utils.formataddr(pair) for pair in to_addresses])
if self.cc:
content += 'Cc: %s\n' % ', '.join([email.utils.formataddr(pair) for pair in cc_addresses])
- content += 'Message-ID: %s\n' % email.utils.make_msgid()
+ content += 'Message-ID: %s\n' % email.utils.make_msgid(domain=self.get_option('message_id_domain'))
content += 'Subject: %s\n\n' % subject.strip()
content += body
diff --git a/ansible_collections/community/general/plugins/callback/nrdp.py b/ansible_collections/community/general/plugins/callback/nrdp.py
index c16a3c7be..62f4a89ec 100644
--- a/ansible_collections/community/general/plugins/callback/nrdp.py
+++ b/ansible_collections/community/general/plugins/callback/nrdp.py
@@ -14,7 +14,7 @@ DOCUMENTATION = '''
short_description: Post task results to a Nagios server through nrdp
description:
- This callback send playbook result to Nagios.
- - Nagios shall use NRDP to recive passive events.
+ - Nagios shall use NRDP to receive passive events.
- The passive check is sent to a dedicated host/service for Ansible.
options:
url:
diff --git a/ansible_collections/community/general/plugins/callback/null.py b/ansible_collections/community/general/plugins/callback/null.py
index f53a24294..6aeeba313 100644
--- a/ansible_collections/community/general/plugins/callback/null.py
+++ b/ansible_collections/community/general/plugins/callback/null.py
@@ -15,7 +15,7 @@ DOCUMENTATION = '''
- set as main display callback
short_description: Don't display stuff to screen
description:
- - This callback prevents outputing events to screen.
+ - This callback prevents outputting events to screen.
'''
from ansible.plugins.callback import CallbackBase
@@ -24,7 +24,7 @@ from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
'''
- This callback wont print messages to stdout when new callback events are received.
+ This callback won't print messages to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
diff --git a/ansible_collections/community/general/plugins/callback/opentelemetry.py b/ansible_collections/community/general/plugins/callback/opentelemetry.py
index e00e1d71a..492e42071 100644
--- a/ansible_collections/community/general/plugins/callback/opentelemetry.py
+++ b/ansible_collections/community/general/plugins/callback/opentelemetry.py
@@ -32,10 +32,10 @@ DOCUMENTATION = '''
enable_from_environment:
type: str
description:
- - Whether to enable this callback only if the given environment variable exists and it is set to C(true).
+ - Whether to enable this callback only if the given environment variable exists and it is set to V(true).
- This is handy when you use Configuration as Code and want to send distributed traces
if running in the CI rather when running Ansible locally.
- - For such, it evaluates the given I(enable_from_environment) value as environment variable
+ - For such, it evaluates the given O(enable_from_environment) value as environment variable
and if set to true this plugin will be enabled.
env:
- name: ANSIBLE_OPENTELEMETRY_ENABLE_FROM_ENVIRONMENT
@@ -73,6 +73,17 @@ DOCUMENTATION = '''
- section: callback_opentelemetry
key: disable_logs
version_added: 5.8.0
+ disable_attributes_in_logs:
+ default: false
+ type: bool
+ description:
+ - Disable populating span attributes to the logs.
+ env:
+ - name: ANSIBLE_OPENTELEMETRY_DISABLE_ATTRIBUTES_IN_LOGS
+ ini:
+ - section: callback_opentelemetry
+ key: disable_attributes_in_logs
+ version_added: 7.1.0
requirements:
- opentelemetry-api (Python library)
- opentelemetry-exporter-otlp (Python library)
@@ -244,7 +255,7 @@ class OpenTelemetrySource(object):
task.dump = dump
task.add_host(HostData(host_uuid, host_name, status, result))
- def generate_distributed_traces(self, otel_service_name, ansible_playbook, tasks_data, status, traceparent, disable_logs):
+ def generate_distributed_traces(self, otel_service_name, ansible_playbook, tasks_data, status, traceparent, disable_logs, disable_attributes_in_logs):
""" generate distributed traces from the collected TaskData and HostData """
tasks = []
@@ -280,9 +291,9 @@ class OpenTelemetrySource(object):
for task in tasks:
for host_uuid, host_data in task.host_data.items():
with tracer.start_as_current_span(task.name, start_time=task.start, end_on_exit=False) as span:
- self.update_span_data(task, host_data, span, disable_logs)
+ self.update_span_data(task, host_data, span, disable_logs, disable_attributes_in_logs)
- def update_span_data(self, task_data, host_data, span, disable_logs):
+ def update_span_data(self, task_data, host_data, span, disable_logs, disable_attributes_in_logs):
""" update the span with the given TaskData and HostData """
name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
@@ -315,39 +326,47 @@ class OpenTelemetrySource(object):
status = Status(status_code=StatusCode.UNSET)
span.set_status(status)
+
+ # Create the span and log attributes
+ attributes = {
+ "ansible.task.module": task_data.action,
+ "ansible.task.message": message,
+ "ansible.task.name": name,
+ "ansible.task.result": rc,
+ "ansible.task.host.name": host_data.name,
+ "ansible.task.host.status": host_data.status
+ }
if isinstance(task_data.args, dict) and "gather_facts" not in task_data.action:
names = tuple(self.transform_ansible_unicode_to_str(k) for k in task_data.args.keys())
values = tuple(self.transform_ansible_unicode_to_str(k) for k in task_data.args.values())
- self.set_span_attribute(span, ("ansible.task.args.name"), names)
- self.set_span_attribute(span, ("ansible.task.args.value"), values)
- self.set_span_attribute(span, "ansible.task.module", task_data.action)
- self.set_span_attribute(span, "ansible.task.message", message)
- self.set_span_attribute(span, "ansible.task.name", name)
- self.set_span_attribute(span, "ansible.task.result", rc)
- self.set_span_attribute(span, "ansible.task.host.name", host_data.name)
- self.set_span_attribute(span, "ansible.task.host.status", host_data.status)
+ attributes[("ansible.task.args.name")] = names
+ attributes[("ansible.task.args.value")] = values
+
+ self.set_span_attributes(span, attributes)
+
# This will allow to enrich the service map
self.add_attributes_for_service_map_if_possible(span, task_data)
# Send logs
if not disable_logs:
- span.add_event(task_data.dump)
- span.end(end_time=host_data.finish)
+ # This will avoid populating span attributes to the logs
+ span.add_event(task_data.dump, attributes={} if disable_attributes_in_logs else attributes)
+ span.end(end_time=host_data.finish)
- def set_span_attribute(self, span, attributeName, attributeValue):
- """ update the span attribute with the given attribute and value if not None """
+ def set_span_attributes(self, span, attributes):
+ """ update the span attributes with the given attributes if not None """
if span is None and self._display is not None:
self._display.warning('span object is None. Please double check if that is expected.')
else:
- if attributeValue is not None:
- span.set_attribute(attributeName, attributeValue)
+ if attributes is not None:
+ span.set_attributes(attributes)
def add_attributes_for_service_map_if_possible(self, span, task_data):
"""Update the span attributes with the service that the task interacted with, if possible."""
redacted_url = self.parse_and_redact_url_if_possible(task_data.args)
if redacted_url:
- self.set_span_attribute(span, "http.url", redacted_url.geturl())
+ span.set_attribute("http.url", redacted_url.geturl())
@staticmethod
def parse_and_redact_url_if_possible(args):
@@ -434,6 +453,7 @@ class CallbackModule(CallbackBase):
def __init__(self, display=None):
super(CallbackModule, self).__init__(display=display)
self.hide_task_arguments = None
+ self.disable_attributes_in_logs = None
self.disable_logs = None
self.otel_service_name = None
self.ansible_playbook = None
@@ -465,6 +485,8 @@ class CallbackModule(CallbackBase):
self.hide_task_arguments = self.get_option('hide_task_arguments')
+ self.disable_attributes_in_logs = self.get_option('disable_attributes_in_logs')
+
self.disable_logs = self.get_option('disable_logs')
self.otel_service_name = self.get_option('otel_service_name')
@@ -562,7 +584,8 @@ class CallbackModule(CallbackBase):
self.tasks_data,
status,
self.traceparent,
- self.disable_logs
+ self.disable_logs,
+ self.disable_attributes_in_logs
)
def v2_runner_on_async_failed(self, result, **kwargs):
diff --git a/ansible_collections/community/general/plugins/callback/say.py b/ansible_collections/community/general/plugins/callback/say.py
index 005725a22..9d96ad74d 100644
--- a/ansible_collections/community/general/plugins/callback/say.py
+++ b/ansible_collections/community/general/plugins/callback/say.py
@@ -18,8 +18,6 @@ DOCUMENTATION = '''
short_description: notify using software speech synthesizer
description:
- This plugin will use the C(say) or C(espeak) program to "speak" about play events.
- notes:
- - In Ansible 2.8, this callback has been renamed from C(osx_say) into M(community.general.say).
'''
import platform
diff --git a/ansible_collections/community/general/plugins/callback/selective.py b/ansible_collections/community/general/plugins/callback/selective.py
index 526975bd2..069675783 100644
--- a/ansible_collections/community/general/plugins/callback/selective.py
+++ b/ansible_collections/community/general/plugins/callback/selective.py
@@ -44,26 +44,17 @@ from ansible import constants as C
from ansible.plugins.callback import CallbackBase
from ansible.module_utils.common.text.converters import to_text
-try:
- codeCodes = C.COLOR_CODES
-except AttributeError:
- # This constant was moved to ansible.constants in
- # https://github.com/ansible/ansible/commit/1202dd000f10b0e8959019484f1c3b3f9628fc67
- # (will be included in ansible-core 2.11.0). For older Ansible/ansible-base versions,
- # we include from the original location.
- from ansible.utils.color import codeCodes
-
DONT_COLORIZE = False
COLORS = {
'normal': '\033[0m',
- 'ok': '\033[{0}m'.format(codeCodes[C.COLOR_OK]),
+ 'ok': '\033[{0}m'.format(C.COLOR_CODES[C.COLOR_OK]),
'bold': '\033[1m',
'not_so_bold': '\033[1m\033[34m',
- 'changed': '\033[{0}m'.format(codeCodes[C.COLOR_CHANGED]),
- 'failed': '\033[{0}m'.format(codeCodes[C.COLOR_ERROR]),
+ 'changed': '\033[{0}m'.format(C.COLOR_CODES[C.COLOR_CHANGED]),
+ 'failed': '\033[{0}m'.format(C.COLOR_CODES[C.COLOR_ERROR]),
'endc': '\033[0m',
- 'skipped': '\033[{0}m'.format(codeCodes[C.COLOR_SKIP]),
+ 'skipped': '\033[{0}m'.format(C.COLOR_CODES[C.COLOR_SKIP]),
}
@@ -115,8 +106,8 @@ class CallbackModule(CallbackBase):
line_length = 120
if self.last_skipped:
print()
- msg = colorize("# {0} {1}".format(task_name,
- '*' * (line_length - len(task_name))), 'bold')
+ line = "# {0} ".format(task_name)
+ msg = colorize("{0}{1}".format(line, '*' * (line_length - len(line))), 'bold')
print(msg)
def _indent_text(self, text, indent_level):
diff --git a/ansible_collections/community/general/plugins/callback/slack.py b/ansible_collections/community/general/plugins/callback/slack.py
index e9b84bbb3..e7a2743ec 100644
--- a/ansible_collections/community/general/plugins/callback/slack.py
+++ b/ansible_collections/community/general/plugins/callback/slack.py
@@ -18,7 +18,6 @@ DOCUMENTATION = '''
short_description: Sends play events to a Slack channel
description:
- This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution.
- - Before Ansible 2.4 only environment variables were available for configuring this plugin.
options:
webhook_url:
required: true
diff --git a/ansible_collections/community/general/plugins/callback/splunk.py b/ansible_collections/community/general/plugins/callback/splunk.py
index 67ad944d2..d15547f44 100644
--- a/ansible_collections/community/general/plugins/callback/splunk.py
+++ b/ansible_collections/community/general/plugins/callback/splunk.py
@@ -36,8 +36,8 @@ DOCUMENTATION = '''
key: authtoken
validate_certs:
description: Whether to validate certificates for connections to HEC. It is not recommended to set to
- C(false) except when you are sure that nobody can intercept the connection
- between this plugin and HEC, as setting it to C(false) allows man-in-the-middle attacks!
+ V(false) except when you are sure that nobody can intercept the connection
+ between this plugin and HEC, as setting it to V(false) allows man-in-the-middle attacks!
env:
- name: SPLUNK_VALIDATE_CERTS
ini:
diff --git a/ansible_collections/community/general/plugins/callback/sumologic.py b/ansible_collections/community/general/plugins/callback/sumologic.py
index 998081c35..46ab3f0f7 100644
--- a/ansible_collections/community/general/plugins/callback/sumologic.py
+++ b/ansible_collections/community/general/plugins/callback/sumologic.py
@@ -6,7 +6,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
name: sumologic
type: notification
short_description: Sends task result events to Sumologic
@@ -15,8 +15,8 @@ description:
- This callback plugin will send task results as JSON formatted events to a Sumologic HTTP collector source.
requirements:
- Whitelisting this callback plugin
- - 'Create a HTTP collector source in Sumologic and specify a custom timestamp format of C(yyyy-MM-dd HH:mm:ss ZZZZ) and a custom timestamp locator
- of C("timestamp": "(.*)")'
+ - 'Create a HTTP collector source in Sumologic and specify a custom timestamp format of V(yyyy-MM-dd HH:mm:ss ZZZZ) and a custom timestamp locator
+ of V("timestamp": "(.*\)")'
options:
url:
description: URL to the Sumologic HTTP collector source.
diff --git a/ansible_collections/community/general/plugins/callback/syslog_json.py b/ansible_collections/community/general/plugins/callback/syslog_json.py
index 0f5ec4d0d..43d6ff2f9 100644
--- a/ansible_collections/community/general/plugins/callback/syslog_json.py
+++ b/ansible_collections/community/general/plugins/callback/syslog_json.py
@@ -16,7 +16,6 @@ DOCUMENTATION = '''
short_description: sends JSON events to syslog
description:
- This plugin logs ansible-playbook and ansible runs to a syslog server in JSON format.
- - Before Ansible 2.9 only environment variables were available for configuration.
options:
server:
description: Syslog server that will receive the event.
diff --git a/ansible_collections/community/general/plugins/callback/unixy.py b/ansible_collections/community/general/plugins/callback/unixy.py
index 02a2e46ba..4908202c2 100644
--- a/ansible_collections/community/general/plugins/callback/unixy.py
+++ b/ansible_collections/community/general/plugins/callback/unixy.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2017, Allyson Bowles <@akatch>
+# Copyright (c) 2023, Al Bowles <@akatch>
# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
@@ -11,7 +11,7 @@ __metaclass__ = type
DOCUMENTATION = '''
name: unixy
type: stdout
- author: Allyson Bowles (@akatch)
+ author: Al Bowles (@akatch)
short_description: condensed Ansible output
description:
- Consolidated Ansible output in the style of LINUX/UNIX startup logs.
@@ -40,7 +40,6 @@ class CallbackModule(CallbackModule_default):
- Only display task names if the task runs on at least one host
- Add option to display all hostnames on a single line in the appropriate result color (failures may have a separate line)
- Consolidate stats display
- - Display whether run is in --check mode
- Don't show play name if no hosts found
'''
@@ -92,19 +91,31 @@ class CallbackModule(CallbackModule_default):
def v2_playbook_on_task_start(self, task, is_conditional):
self._get_task_display_name(task)
if self.task_display_name is not None:
- self._display.display("%s..." % self.task_display_name)
+ if task.check_mode and self.get_option('check_mode_markers'):
+ self._display.display("%s (check mode)..." % self.task_display_name)
+ else:
+ self._display.display("%s..." % self.task_display_name)
def v2_playbook_on_handler_task_start(self, task):
self._get_task_display_name(task)
if self.task_display_name is not None:
- self._display.display("%s (via handler)... " % self.task_display_name)
+ if task.check_mode and self.get_option('check_mode_markers'):
+ self._display.display("%s (via handler in check mode)... " % self.task_display_name)
+ else:
+ self._display.display("%s (via handler)... " % self.task_display_name)
def v2_playbook_on_play_start(self, play):
name = play.get_name().strip()
- if name and play.hosts:
- msg = u"\n- %s on hosts: %s -" % (name, ",".join(play.hosts))
+ if play.check_mode and self.get_option('check_mode_markers'):
+ if name and play.hosts:
+ msg = u"\n- %s (in check mode) on hosts: %s -" % (name, ",".join(play.hosts))
+ else:
+ msg = u"- check mode -"
else:
- msg = u"---"
+ if name and play.hosts:
+ msg = u"\n- %s on hosts: %s -" % (name, ",".join(play.hosts))
+ else:
+ msg = u"---"
self._display.display(msg)
@@ -227,8 +238,10 @@ class CallbackModule(CallbackModule_default):
self._display.display(" Ran out of hosts!", color=C.COLOR_ERROR)
def v2_playbook_on_start(self, playbook):
- # TODO display whether this run is happening in check mode
- self._display.display("Executing playbook %s" % basename(playbook._file_name))
+ if context.CLIARGS['check'] and self.get_option('check_mode_markers'):
+ self._display.display("Executing playbook %s in check mode" % basename(playbook._file_name))
+ else:
+ self._display.display("Executing playbook %s" % basename(playbook._file_name))
# show CLI arguments
if self._display.verbosity > 3:
diff --git a/ansible_collections/community/general/plugins/connection/chroot.py b/ansible_collections/community/general/plugins/connection/chroot.py
index ef6d5566d..810316aaa 100644
--- a/ansible_collections/community/general/plugins/connection/chroot.py
+++ b/ansible_collections/community/general/plugins/connection/chroot.py
@@ -46,8 +46,42 @@ DOCUMENTATION = '''
vars:
- name: ansible_chroot_exe
default: chroot
+ disable_root_check:
+ description:
+ - Do not check that the user is not root.
+ ini:
+ - section: chroot_connection
+ key: disable_root_check
+ env:
+ - name: ANSIBLE_CHROOT_DISABLE_ROOT_CHECK
+ vars:
+ - name: ansible_chroot_disable_root_check
+ default: false
+ type: bool
+ version_added: 7.3.0
'''
+EXAMPLES = r"""
+# Plugin requires root privileges for chroot, -E preserves your env (and location of ~/.ansible):
+# sudo -E ansible-playbook ...
+#
+# Static inventory file
+# [chroots]
+# /path/to/debootstrap
+# /path/to/feboostrap
+# /path/to/lxc-image
+# /path/to/chroot
+
+# playbook
+---
+- hosts: chroots
+ connection: community.general.chroot
+ tasks:
+ - debug:
+ msg: "This is coming from chroot environment"
+
+"""
+
import os
import os.path
import subprocess
@@ -81,11 +115,7 @@ class Connection(ConnectionBase):
self.chroot = self._play_context.remote_addr
- if os.geteuid() != 0:
- raise AnsibleError("chroot connection requires running as root")
-
- # we're running as root on the local system so do some
- # trivial checks for ensuring 'host' is actually a chroot'able dir
+ # do some trivial checks for ensuring 'host' is actually a chroot'able dir
if not os.path.isdir(self.chroot):
raise AnsibleError("%s is not a directory" % self.chroot)
@@ -99,6 +129,11 @@ class Connection(ConnectionBase):
def _connect(self):
""" connect to the chroot """
+ if not self.get_option('disable_root_check') and os.geteuid() != 0:
+ raise AnsibleError(
+ "chroot connection requires running as root. "
+ "You can override this check with the `disable_root_check` option.")
+
if os.path.isabs(self.get_option('chroot_exe')):
self.chroot_cmd = self.get_option('chroot_exe')
else:
diff --git a/ansible_collections/community/general/plugins/connection/funcd.py b/ansible_collections/community/general/plugins/connection/funcd.py
index 9f37f791d..219a8cccd 100644
--- a/ansible_collections/community/general/plugins/connection/funcd.py
+++ b/ansible_collections/community/general/plugins/connection/funcd.py
@@ -70,7 +70,7 @@ class Connection(ConnectionBase):
if in_data:
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
- # totally ignores privlege escalation
+ # totally ignores privilege escalation
display.vvv("EXEC %s" % cmd, host=self.host)
p = self.client.command.run(cmd)[self.host]
return p[0], p[1], p[2]
diff --git a/ansible_collections/community/general/plugins/connection/incus.py b/ansible_collections/community/general/plugins/connection/incus.py
new file mode 100644
index 000000000..81d6f971c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/connection/incus.py
@@ -0,0 +1,169 @@
+# -*- coding: utf-8 -*-
+# Based on lxd.py (c) 2016, Matt Clay <matt@mystile.com>
+# (c) 2023, Stephane Graber <stgraber@stgraber.org>
+# Copyright (c) 2023 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ author: Stéphane Graber (@stgraber)
+ name: incus
+ short_description: Run tasks in Incus instances via the Incus CLI.
+ description:
+ - Run commands or put/fetch files to an existing Incus instance using Incus CLI.
+ version_added: "8.2.0"
+ options:
+ remote_addr:
+ description:
+ - The instance identifier.
+ default: inventory_hostname
+ vars:
+ - name: inventory_hostname
+ - name: ansible_host
+ - name: ansible_incus_host
+ executable:
+ description:
+ - The shell to use for execution inside the instance.
+ default: /bin/sh
+ vars:
+ - name: ansible_executable
+ - name: ansible_incus_executable
+ remote:
+ description:
+ - The name of the Incus remote to use (per C(incus remote list)).
+ - Remotes are used to access multiple servers from a single client.
+ default: local
+ vars:
+ - name: ansible_incus_remote
+ project:
+ description:
+ - The name of the Incus project to use (per C(incus project list)).
+ - Projects are used to divide the instances running on a server.
+ default: default
+ vars:
+ - name: ansible_incus_project
+"""
+
+import os
+from subprocess import call, Popen, PIPE
+
+from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.plugins.connection import ConnectionBase
+
+
+class Connection(ConnectionBase):
+ """ Incus based connections """
+
+ transport = "incus"
+ has_pipelining = True
+ default_user = 'root'
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ self._incus_cmd = get_bin_path("incus")
+
+ if not self._incus_cmd:
+ raise AnsibleError("incus command not found in PATH")
+
+ def _connect(self):
+ """connect to Incus (nothing to do here) """
+ super(Connection, self)._connect()
+
+ if not self._connected:
+ self._display.vvv(u"ESTABLISH Incus CONNECTION FOR USER: root",
+ host=self._instance())
+ self._connected = True
+
+ def _instance(self):
+ # Return only the leading part of the FQDN as the instance name
+ # as Incus instance names cannot be a FQDN.
+ return self.get_option('remote_addr').split(".")[0]
+
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ """ execute a command on the Incus host """
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ self._display.vvv(u"EXEC {0}".format(cmd),
+ host=self._instance())
+
+ local_cmd = [
+ self._incus_cmd,
+ "--project", self.get_option("project"),
+ "exec",
+ "%s:%s" % (self.get_option("remote"), self._instance()),
+ "--",
+ self._play_context.executable, "-c", cmd]
+
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+ in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru')
+
+ process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
+ stdout, stderr = process.communicate(in_data)
+
+ stdout = to_text(stdout)
+ stderr = to_text(stderr)
+
+ if stderr == "Error: Instance is not running.\n":
+ raise AnsibleConnectionFailure("instance not running: %s" %
+ self._instance())
+
+ if stderr == "Error: Instance not found\n":
+ raise AnsibleConnectionFailure("instance not found: %s" %
+ self._instance())
+
+ return process.returncode, stdout, stderr
+
+ def put_file(self, in_path, out_path):
+ """ put a file from local to Incus """
+ super(Connection, self).put_file(in_path, out_path)
+
+ self._display.vvv(u"PUT {0} TO {1}".format(in_path, out_path),
+ host=self._instance())
+
+ if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')):
+ raise AnsibleFileNotFound("input path is not a file: %s" % in_path)
+
+ local_cmd = [
+ self._incus_cmd,
+ "--project", self.get_option("project"),
+ "file", "push", "--quiet",
+ in_path,
+ "%s:%s/%s" % (self.get_option("remote"),
+ self._instance(),
+ out_path)]
+
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+
+ call(local_cmd)
+
+ def fetch_file(self, in_path, out_path):
+ """ fetch a file from Incus to local """
+ super(Connection, self).fetch_file(in_path, out_path)
+
+ self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path),
+ host=self._instance())
+
+ local_cmd = [
+ self._incus_cmd,
+ "--project", self.get_option("project"),
+ "file", "pull", "--quiet",
+ "%s:%s/%s" % (self.get_option("remote"),
+ self._instance(),
+ in_path),
+ out_path]
+
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+
+ call(local_cmd)
+
+ def close(self):
+ """ close the connection (nothing to do here) """
+ super(Connection, self).close()
+
+ self._connected = False
diff --git a/ansible_collections/community/general/plugins/connection/lxc.py b/ansible_collections/community/general/plugins/connection/lxc.py
index adf3eec1c..7bb5824fa 100644
--- a/ansible_collections/community/general/plugins/connection/lxc.py
+++ b/ansible_collections/community/general/plugins/connection/lxc.py
@@ -19,6 +19,7 @@ DOCUMENTATION = '''
- Container identifier
default: inventory_hostname
vars:
+ - name: inventory_hostname
- name: ansible_host
- name: ansible_lxc_host
executable:
@@ -59,7 +60,7 @@ class Connection(ConnectionBase):
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
- self.container_name = self._play_context.remote_addr
+ self.container_name = None
self.container = None
def _connect(self):
@@ -67,12 +68,15 @@ class Connection(ConnectionBase):
super(Connection, self)._connect()
if not HAS_LIBLXC:
- msg = "lxc bindings for python2 are not installed"
+ msg = "lxc python bindings are not installed"
raise errors.AnsibleError(msg)
- if self.container:
+ container_name = self.get_option('remote_addr')
+ if self.container and self.container_name == container_name:
return
+ self.container_name = container_name
+
self._display.vvv("THIS IS A LOCAL LXC DIR", host=self.container_name)
self.container = _lxc.Container(self.container_name)
if self.container.state == "STOPPED":
@@ -117,7 +121,7 @@ class Connection(ConnectionBase):
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
# python2-lxc needs bytes. python3-lxc needs text.
- executable = to_native(self._play_context.executable, errors='surrogate_or_strict')
+ executable = to_native(self.get_option('executable'), errors='surrogate_or_strict')
local_cmd = [executable, '-c', to_native(cmd, errors='surrogate_or_strict')]
read_stdout, write_stdout = None, None
diff --git a/ansible_collections/community/general/plugins/connection/lxd.py b/ansible_collections/community/general/plugins/connection/lxd.py
index affb87dfd..0e784b85f 100644
--- a/ansible_collections/community/general/plugins/connection/lxd.py
+++ b/ansible_collections/community/general/plugins/connection/lxd.py
@@ -10,13 +10,15 @@ __metaclass__ = type
DOCUMENTATION = '''
author: Matt Clay (@mattclay) <matt@mystile.com>
name: lxd
- short_description: Run tasks in lxc containers via lxc CLI
+ short_description: Run tasks in LXD instances via C(lxc) CLI
description:
- - Run commands or put/fetch files to an existing lxc container using lxc CLI
+ - Run commands or put/fetch files to an existing instance using C(lxc) CLI.
options:
remote_addr:
description:
- - Container identifier.
+ - Instance (container/VM) identifier.
+ - Since community.general 8.0.0, a FQDN can be provided; in that case, the first component (the part before C(.))
+ is used as the instance identifier.
default: inventory_hostname
vars:
- name: inventory_hostname
@@ -24,7 +26,7 @@ DOCUMENTATION = '''
- name: ansible_lxd_host
executable:
description:
- - shell to use for execution inside container
+ - Shell to use for execution inside instance.
default: /bin/sh
vars:
- name: ansible_executable
@@ -69,32 +71,38 @@ class Connection(ConnectionBase):
raise AnsibleError("lxc command not found in PATH")
if self._play_context.remote_user is not None and self._play_context.remote_user != 'root':
- self._display.warning('lxd does not support remote_user, using container default: root')
+ self._display.warning('lxd does not support remote_user, using default: root')
+
+ def _host(self):
+ """ translate remote_addr to lxd (short) hostname """
+ return self.get_option("remote_addr").split(".", 1)[0]
def _connect(self):
"""connect to lxd (nothing to do here) """
super(Connection, self)._connect()
if not self._connected:
- self._display.vvv(u"ESTABLISH LXD CONNECTION FOR USER: root", host=self.get_option('remote_addr'))
+ self._display.vvv(u"ESTABLISH LXD CONNECTION FOR USER: root", host=self._host())
self._connected = True
def exec_command(self, cmd, in_data=None, sudoable=True):
""" execute a command on the lxd host """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
- self._display.vvv(u"EXEC {0}".format(cmd), host=self.get_option('remote_addr'))
+ self._display.vvv(u"EXEC {0}".format(cmd), host=self._host())
local_cmd = [self._lxc_cmd]
if self.get_option("project"):
local_cmd.extend(["--project", self.get_option("project")])
local_cmd.extend([
"exec",
- "%s:%s" % (self.get_option("remote"), self.get_option("remote_addr")),
+ "%s:%s" % (self.get_option("remote"), self._host()),
"--",
self.get_option("executable"), "-c", cmd
])
+ self._display.vvvvv(u"EXEC {0}".format(local_cmd), host=self._host())
+
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru')
@@ -104,11 +112,13 @@ class Connection(ConnectionBase):
stdout = to_text(stdout)
stderr = to_text(stderr)
- if stderr == "error: Container is not running.\n":
- raise AnsibleConnectionFailure("container not running: %s" % self.get_option('remote_addr'))
+ self._display.vvvvv(u"EXEC lxc output: {0} {1}".format(stdout, stderr), host=self._host())
+
+ if "is not running" in stderr:
+ raise AnsibleConnectionFailure("instance not running: %s" % self._host())
- if stderr == "error: not found\n":
- raise AnsibleConnectionFailure("container not found: %s" % self.get_option('remote_addr'))
+ if stderr.strip() == "Error: Instance not found" or stderr.strip() == "error: not found":
+ raise AnsibleConnectionFailure("instance not found: %s" % self._host())
return process.returncode, stdout, stderr
@@ -116,7 +126,7 @@ class Connection(ConnectionBase):
""" put a file from local to lxd """
super(Connection, self).put_file(in_path, out_path)
- self._display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self.get_option('remote_addr'))
+ self._display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self._host())
if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound("input path is not a file: %s" % in_path)
@@ -127,7 +137,7 @@ class Connection(ConnectionBase):
local_cmd.extend([
"file", "push",
in_path,
- "%s:%s/%s" % (self.get_option("remote"), self.get_option("remote_addr"), out_path)
+ "%s:%s/%s" % (self.get_option("remote"), self._host(), out_path)
])
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
@@ -139,14 +149,14 @@ class Connection(ConnectionBase):
""" fetch a file from lxd to local """
super(Connection, self).fetch_file(in_path, out_path)
- self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self.get_option('remote_addr'))
+ self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._host())
local_cmd = [self._lxc_cmd]
if self.get_option("project"):
local_cmd.extend(["--project", self.get_option("project")])
local_cmd.extend([
"file", "pull",
- "%s:%s/%s" % (self.get_option("remote"), self.get_option("remote_addr"), in_path),
+ "%s:%s/%s" % (self.get_option("remote"), self._host(), in_path),
out_path
])
diff --git a/ansible_collections/community/general/plugins/doc_fragments/alicloud.py b/ansible_collections/community/general/plugins/doc_fragments/alicloud.py
index f464e178c..b462fcacb 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/alicloud.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/alicloud.py
@@ -15,40 +15,40 @@ class ModuleDocFragment(object):
options:
alicloud_access_key:
description:
- - Alibaba Cloud access key. If not set then the value of environment variable C(ALICLOUD_ACCESS_KEY),
- C(ALICLOUD_ACCESS_KEY_ID) will be used instead.
+ - Alibaba Cloud access key. If not set then the value of environment variable E(ALICLOUD_ACCESS_KEY),
+ E(ALICLOUD_ACCESS_KEY_ID) will be used instead.
aliases: ['access_key_id', 'access_key']
type: str
alicloud_secret_key:
description:
- - Alibaba Cloud secret key. If not set then the value of environment variable C(ALICLOUD_SECRET_KEY),
- C(ALICLOUD_SECRET_ACCESS_KEY) will be used instead.
+ - Alibaba Cloud secret key. If not set then the value of environment variable E(ALICLOUD_SECRET_KEY),
+ E(ALICLOUD_SECRET_ACCESS_KEY) will be used instead.
aliases: ['secret_access_key', 'secret_key']
type: str
alicloud_region:
description:
- The Alibaba Cloud region to use. If not specified then the value of environment variable
- C(ALICLOUD_REGION), C(ALICLOUD_REGION_ID) will be used instead.
+ E(ALICLOUD_REGION), E(ALICLOUD_REGION_ID) will be used instead.
aliases: ['region', 'region_id']
required: true
type: str
alicloud_security_token:
description:
- The Alibaba Cloud security token. If not specified then the value of environment variable
- C(ALICLOUD_SECURITY_TOKEN) will be used instead.
+ E(ALICLOUD_SECURITY_TOKEN) will be used instead.
aliases: ['security_token']
type: str
alicloud_assume_role:
description:
- If provided with a role ARN, Ansible will attempt to assume this role using the supplied credentials.
- - The nested assume_role block supports I(alicloud_assume_role_arn), I(alicloud_assume_role_session_name),
- I(alicloud_assume_role_session_expiration) and I(alicloud_assume_role_policy)
+ - The nested assume_role block supports C(alicloud_assume_role_arn), C(alicloud_assume_role_session_name),
+ C(alicloud_assume_role_session_expiration) and C(alicloud_assume_role_policy).
type: dict
aliases: ['assume_role']
alicloud_assume_role_arn:
description:
- - The Alibaba Cloud role_arn. The ARN of the role to assume. If ARN is set to an empty string,
- it does not perform role switching. It supports environment variable ALICLOUD_ASSUME_ROLE_ARN.
+ - The Alibaba Cloud C(role_arn). The ARN of the role to assume. If ARN is set to an empty string,
+ it does not perform role switching. It supports environment variable E(ALICLOUD_ASSUME_ROLE_ARN).
ansible will execute with provided credentials.
aliases: ['assume_role_arn']
type: str
@@ -56,14 +56,14 @@ options:
description:
- The Alibaba Cloud session_name. The session name to use when assuming the role. If omitted,
'ansible' is passed to the AssumeRole call as session name. It supports environment variable
- ALICLOUD_ASSUME_ROLE_SESSION_NAME
+ E(ALICLOUD_ASSUME_ROLE_SESSION_NAME).
aliases: ['assume_role_session_name']
type: str
alicloud_assume_role_session_expiration:
description:
- - The Alibaba Cloud session_expiration. The time after which the established session for assuming
+ - The Alibaba Cloud C(session_expiration). The time after which the established session for assuming
role expires. Valid value range 900-3600 seconds. Default to 3600 (in this case Alicloud use own default
- value). It supports environment variable ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION
+ value). It supports environment variable E(ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION).
aliases: ['assume_role_session_expiration']
type: int
ecs_role_name:
@@ -79,31 +79,31 @@ options:
profile:
description:
- This is the Alicloud profile name as set in the shared credentials file. It can also be sourced from the
- ALICLOUD_PROFILE environment variable.
+ E(ALICLOUD_PROFILE) environment variable.
type: str
shared_credentials_file:
description:
- - This is the path to the shared credentials file. It can also be sourced from the ALICLOUD_SHARED_CREDENTIALS_FILE
+ - This is the path to the shared credentials file. It can also be sourced from the E(ALICLOUD_SHARED_CREDENTIALS_FILE)
environment variable.
- - If this is not set and a profile is specified, ~/.aliyun/config.json will be used.
+ - If this is not set and a profile is specified, C(~/.aliyun/config.json) will be used.
type: str
author:
- "He Guimin (@xiaozhu36)"
requirements:
- - "python >= 3.6"
+ - "Python >= 3.6"
notes:
- If parameters are not set within the module, the following
environment variables can be used in decreasing order of precedence
- C(ALICLOUD_ACCESS_KEY) or C(ALICLOUD_ACCESS_KEY_ID),
- C(ALICLOUD_SECRET_KEY) or C(ALICLOUD_SECRET_ACCESS_KEY),
- C(ALICLOUD_REGION) or C(ALICLOUD_REGION_ID),
- C(ALICLOUD_SECURITY_TOKEN),
- C(ALICLOUD_ECS_ROLE_NAME),
- C(ALICLOUD_SHARED_CREDENTIALS_FILE),
- C(ALICLOUD_PROFILE),
- C(ALICLOUD_ASSUME_ROLE_ARN),
- C(ALICLOUD_ASSUME_ROLE_SESSION_NAME),
- C(ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION),
- - C(ALICLOUD_REGION) or C(ALICLOUD_REGION_ID) can be typically be used to specify the
- ALICLOUD region, when required, but this can also be configured in the footmark config file
+ E(ALICLOUD_ACCESS_KEY) or E(ALICLOUD_ACCESS_KEY_ID),
+ E(ALICLOUD_SECRET_KEY) or E(ALICLOUD_SECRET_ACCESS_KEY),
+ E(ALICLOUD_REGION) or E(ALICLOUD_REGION_ID),
+ E(ALICLOUD_SECURITY_TOKEN),
+ E(ALICLOUD_ECS_ROLE_NAME),
+ E(ALICLOUD_SHARED_CREDENTIALS_FILE),
+ E(ALICLOUD_PROFILE),
+ E(ALICLOUD_ASSUME_ROLE_ARN),
+ E(ALICLOUD_ASSUME_ROLE_SESSION_NAME),
+ E(ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION).
+ - E(ALICLOUD_REGION) or E(ALICLOUD_REGION_ID) can be typically be used to specify the
+ Alicloud region, when required, but this can also be configured in the footmark config file
'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/auth_basic.py b/ansible_collections/community/general/plugins/doc_fragments/auth_basic.py
index 674fb1e9a..77d127c62 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/auth_basic.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/auth_basic.py
@@ -14,19 +14,19 @@ class ModuleDocFragment(object):
options:
api_url:
description:
- - The resolvable endpoint for the API
+ - The resolvable endpoint for the API.
type: str
api_username:
description:
- - The username to use for authentication against the API
+ - The username to use for authentication against the API.
type: str
api_password:
description:
- - The password to use for authentication against the API
+ - The password to use for authentication against the API.
type: str
validate_certs:
description:
- - Whether or not to validate SSL certs when supplying a https endpoint.
+ - Whether or not to validate SSL certs when supplying a HTTPS endpoint.
type: bool
default: true
'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/bitbucket.py b/ansible_collections/community/general/plugins/doc_fragments/bitbucket.py
index 703bb412a..0a66ea0a6 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/bitbucket.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/bitbucket.py
@@ -16,25 +16,25 @@ options:
client_id:
description:
- The OAuth consumer key.
- - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used.
+ - If not set the environment variable E(BITBUCKET_CLIENT_ID) will be used.
type: str
client_secret:
description:
- The OAuth consumer secret.
- - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used.
+ - If not set the environment variable E(BITBUCKET_CLIENT_SECRET) will be used.
type: str
user:
description:
- The username.
- - If not set the environment variable C(BITBUCKET_USERNAME) will be used.
- - I(username) is an alias of I(user) since community.genreal 6.0.0. It was an alias of I(workspace) before.
+ - If not set the environment variable E(BITBUCKET_USERNAME) will be used.
+ - O(ignore:username) is an alias of O(user) since community.general 6.0.0. It was an alias of O(workspace) before.
type: str
version_added: 4.0.0
aliases: [ username ]
password:
description:
- The App password.
- - If not set the environment variable C(BITBUCKET_PASSWORD) will be used.
+ - If not set the environment variable E(BITBUCKET_PASSWORD) will be used.
type: str
version_added: 4.0.0
notes:
diff --git a/ansible_collections/community/general/plugins/doc_fragments/consul.py b/ansible_collections/community/general/plugins/doc_fragments/consul.py
new file mode 100644
index 000000000..fbe3f33d4
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/consul.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+class ModuleDocFragment:
+ # Common parameters for Consul modules
+ DOCUMENTATION = r"""
+options:
+ host:
+ description:
+ - Host of the consul agent, defaults to V(localhost).
+ default: localhost
+ type: str
+ port:
+ type: int
+ description:
+ - The port on which the consul agent is running.
+ default: 8500
+ scheme:
+ description:
+ - The protocol scheme on which the consul agent is running.
+ Defaults to V(http) and can be set to V(https) for secure connections.
+ default: http
+ type: str
+ validate_certs:
+ type: bool
+ description:
+ - Whether to verify the TLS certificate of the consul agent.
+ default: true
+ ca_path:
+ description:
+ - The CA bundle to use for https connections
+ type: str
+"""
+
+ TOKEN = r"""
+options:
+ token:
+ description:
+ - The token to use for authorization.
+ type: str
+"""
+
+ ACTIONGROUP_CONSUL = r"""
+options: {}
+attributes:
+ action_group:
+ description: Use C(group/community.general.consul) in C(module_defaults) to set defaults for this module.
+ support: full
+ membership:
+ - community.general.consul
+ version_added: 8.3.0
+"""
diff --git a/ansible_collections/community/general/plugins/doc_fragments/dimensiondata.py b/ansible_collections/community/general/plugins/doc_fragments/dimensiondata.py
index f8372431e..f4d624454 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/dimensiondata.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/dimensiondata.py
@@ -20,22 +20,22 @@ options:
region:
description:
- The target region.
- - Regions are defined in Apache libcloud project [libcloud/common/dimensiondata.py]
- - They are also listed in U(https://libcloud.readthedocs.io/en/latest/compute/drivers/dimensiondata.html)
- - Note that the default value "na" stands for "North America".
- - The module prepends 'dd-' to the region choice.
+ - Regions are defined in Apache libcloud project [libcloud/common/dimensiondata.py].
+ - They are also listed in U(https://libcloud.readthedocs.io/en/latest/compute/drivers/dimensiondata.html).
+ - Note that the default value C(na) stands for "North America".
+ - The module prepends C(dd-) to the region choice.
type: str
default: na
mcp_user:
description:
- The username used to authenticate to the CloudControl API.
- - If not specified, will fall back to C(MCP_USER) from environment variable or C(~/.dimensiondata).
+ - If not specified, will fall back to E(MCP_USER) from environment variable or C(~/.dimensiondata).
type: str
mcp_password:
description:
- The password used to authenticate to the CloudControl API.
- - If not specified, will fall back to C(MCP_PASSWORD) from environment variable or C(~/.dimensiondata).
- - Required if I(mcp_user) is specified.
+ - If not specified, will fall back to E(MCP_PASSWORD) from environment variable or C(~/.dimensiondata).
+ - Required if O(mcp_user) is specified.
type: str
location:
description:
@@ -44,7 +44,7 @@ options:
required: true
validate_certs:
description:
- - If C(false), SSL certificates will not be validated.
+ - If V(false), SSL certificates will not be validated.
- This should only be used on private instances of the CloudControl API that use self-signed certificates.
type: bool
default: true
diff --git a/ansible_collections/community/general/plugins/doc_fragments/dimensiondata_wait.py b/ansible_collections/community/general/plugins/doc_fragments/dimensiondata_wait.py
index d37152839..051d8ca1d 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/dimensiondata_wait.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/dimensiondata_wait.py
@@ -25,13 +25,13 @@ options:
wait_time:
description:
- The maximum amount of time (in seconds) to wait for the task to complete.
- - Only applicable if I(wait=true).
+ - Only applicable if O(wait=true).
type: int
default: 600
wait_poll_interval:
description:
- The amount of time (in seconds) to wait between checks for task completion.
- - Only applicable if I(wait=true).
+ - Only applicable if O(wait=true).
type: int
default: 2
- '''
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/emc.py b/ansible_collections/community/general/plugins/doc_fragments/emc.py
index e9e57a2c1..d685c510d 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/emc.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/emc.py
@@ -39,8 +39,7 @@ options:
default: sysadmin
requirements:
- An EMC VNX Storage device.
- - Ansible 2.7.
- - storops (0.5.10 or greater). Install using 'pip install storops'.
+ - storops (0.5.10 or greater). Install using C(pip install storops).
notes:
- - The modules prefixed with emc_vnx are built to support the EMC VNX storage platform.
+ - The modules prefixed with C(emc_vnx) are built to support the EMC VNX storage platform.
'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/gitlab.py b/ansible_collections/community/general/plugins/doc_fragments/gitlab.py
index 705a93c02..c6434c0ce 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/gitlab.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/gitlab.py
@@ -29,4 +29,9 @@ options:
- GitLab CI job token for logging in.
type: str
version_added: 4.2.0
+ ca_path:
+ description:
+ - The CA certificates bundle to use to verify GitLab server certificate.
+ type: str
+ version_added: 8.1.0
'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/hwc.py b/ansible_collections/community/general/plugins/doc_fragments/hwc.py
index d3cebb6db..8b9ae92b8 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/hwc.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/hwc.py
@@ -19,8 +19,8 @@ options:
required: true
user:
description:
- - The user name to login with (currently only user names are
- supported, and not user IDs).
+ - The user name to login with.
+ - Currently only user names are supported, and not user IDs.
type: str
required: true
password:
@@ -31,14 +31,13 @@ options:
domain:
description:
- The name of the Domain to scope to (Identity v3).
- (currently only domain names are supported, and not domain IDs).
+ - Currently only domain names are supported, and not domain IDs.
type: str
required: true
project:
description:
- The name of the Tenant (Identity v2) or Project (Identity v3).
- (currently only project names are supported, and not
- project IDs).
+ - Currently only project names are supported, and not project IDs.
type: str
required: true
region:
@@ -47,20 +46,20 @@ options:
type: str
id:
description:
- - The id of resource to be managed.
+ - The ID of resource to be managed.
type: str
notes:
- For authentication, you can set identity_endpoint using the
- C(ANSIBLE_HWC_IDENTITY_ENDPOINT) env variable.
+ E(ANSIBLE_HWC_IDENTITY_ENDPOINT) environment variable.
- For authentication, you can set user using the
- C(ANSIBLE_HWC_USER) env variable.
- - For authentication, you can set password using the C(ANSIBLE_HWC_PASSWORD) env
+ E(ANSIBLE_HWC_USER) environment variable.
+ - For authentication, you can set password using the E(ANSIBLE_HWC_PASSWORD) environment
variable.
- - For authentication, you can set domain using the C(ANSIBLE_HWC_DOMAIN) env
+ - For authentication, you can set domain using the E(ANSIBLE_HWC_DOMAIN) environment
variable.
- - For authentication, you can set project using the C(ANSIBLE_HWC_PROJECT) env
+ - For authentication, you can set project using the E(ANSIBLE_HWC_PROJECT) environment
variable.
- - For authentication, you can set region using the C(ANSIBLE_HWC_REGION) env variable.
+ - For authentication, you can set region using the E(ANSIBLE_HWC_REGION) environment variable.
- Environment variables values will only be used if the playbook values are
not set.
'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/ibm_storage.py b/ansible_collections/community/general/plugins/doc_fragments/ibm_storage.py
index ff38c3fc7..7783d9ca5 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/ibm_storage.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/ibm_storage.py
@@ -31,8 +31,7 @@ options:
required: true
notes:
- This module requires pyxcli python library.
- Use 'pip install pyxcli' in order to get pyxcli.
+ Use C(pip install pyxcli) in order to get pyxcli.
requirements:
- - python >= 2.7
- pyxcli
'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/influxdb.py b/ansible_collections/community/general/plugins/doc_fragments/influxdb.py
index 6aedd5ad3..fc0ca02ac 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/influxdb.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/influxdb.py
@@ -16,39 +16,36 @@ options:
hostname:
description:
- The hostname or IP address on which InfluxDB server is listening.
- - Since Ansible 2.5, defaulted to localhost.
type: str
default: localhost
username:
description:
- Username that will be used to authenticate against InfluxDB server.
- - Alias C(login_username) added in Ansible 2.5.
type: str
default: root
aliases: [ login_username ]
password:
description:
- Password that will be used to authenticate against InfluxDB server.
- - Alias C(login_password) added in Ansible 2.5.
type: str
default: root
aliases: [ login_password ]
port:
description:
- - The port on which InfluxDB server is listening
+ - The port on which InfluxDB server is listening.
type: int
default: 8086
path:
description:
- - The path on which InfluxDB server is accessible
- - Only available when using python-influxdb >= 5.1.0
+ - The path on which InfluxDB server is accessible.
+ - Only available when using python-influxdb >= 5.1.0.
type: str
default: ''
version_added: '0.2.0'
validate_certs:
description:
- - If set to C(false), the SSL certificates will not be validated.
- - This should only set to C(false) used on personally controlled sites using self-signed certificates.
+ - If set to V(false), the SSL certificates will not be validated.
+ - This should only set to V(false) used on personally controlled sites using self-signed certificates.
type: bool
default: true
ssl:
@@ -63,8 +60,8 @@ options:
retries:
description:
- Number of retries client will try before aborting.
- - C(0) indicates try until success.
- - Only available when using python-influxdb >= 4.1.0
+ - V(0) indicates try until success.
+ - Only available when using python-influxdb >= 4.1.0.
type: int
default: 3
use_udp:
diff --git a/ansible_collections/community/general/plugins/doc_fragments/ipa.py b/ansible_collections/community/general/plugins/doc_fragments/ipa.py
index 5051c5539..7e091a94a 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/ipa.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/ipa.py
@@ -16,61 +16,56 @@ options:
ipa_port:
description:
- Port of FreeIPA / IPA server.
- - If the value is not specified in the task, the value of environment variable C(IPA_PORT) will be used instead.
- - If both the environment variable C(IPA_PORT) and the value are not specified in the task, then default value is set.
- - Environment variable fallback mechanism is added in Ansible 2.5.
+ - If the value is not specified in the task, the value of environment variable E(IPA_PORT) will be used instead.
+ - If both the environment variable E(IPA_PORT) and the value are not specified in the task, then default value is set.
type: int
default: 443
ipa_host:
description:
- IP or hostname of IPA server.
- - If the value is not specified in the task, the value of environment variable C(IPA_HOST) will be used instead.
- - If both the environment variable C(IPA_HOST) and the value are not specified in the task, then DNS will be used to try to discover the FreeIPA server.
- - The relevant entry needed in FreeIPA is the 'ipa-ca' entry.
- - If neither the DNS entry, nor the environment C(IPA_HOST), nor the value are available in the task, then the default value will be used.
- - Environment variable fallback mechanism is added in Ansible 2.5.
+ - If the value is not specified in the task, the value of environment variable E(IPA_HOST) will be used instead.
+ - If both the environment variable E(IPA_HOST) and the value are not specified in the task, then DNS will be used to try to discover the FreeIPA server.
+ - The relevant entry needed in FreeIPA is the C(ipa-ca) entry.
+ - If neither the DNS entry, nor the environment E(IPA_HOST), nor the value are available in the task, then the default value will be used.
type: str
default: ipa.example.com
ipa_user:
description:
- Administrative account used on IPA server.
- - If the value is not specified in the task, the value of environment variable C(IPA_USER) will be used instead.
- - If both the environment variable C(IPA_USER) and the value are not specified in the task, then default value is set.
- - Environment variable fallback mechanism is added in Ansible 2.5.
+ - If the value is not specified in the task, the value of environment variable E(IPA_USER) will be used instead.
+ - If both the environment variable E(IPA_USER) and the value are not specified in the task, then default value is set.
type: str
default: admin
ipa_pass:
description:
- Password of administrative user.
- - If the value is not specified in the task, the value of environment variable C(IPA_PASS) will be used instead.
- - Note that if the 'urllib_gssapi' library is available, it is possible to use GSSAPI to authenticate to FreeIPA.
- - If the environment variable C(KRB5CCNAME) is available, the module will use this kerberos credentials cache to authenticate to the FreeIPA server.
- - If the environment variable C(KRB5_CLIENT_KTNAME) is available, and C(KRB5CCNAME) is not; the module will use this kerberos keytab to authenticate.
- - If GSSAPI is not available, the usage of 'ipa_pass' is required.
- - Environment variable fallback mechanism is added in Ansible 2.5.
+ - If the value is not specified in the task, the value of environment variable E(IPA_PASS) will be used instead.
+ - Note that if the C(urllib_gssapi) library is available, it is possible to use GSSAPI to authenticate to FreeIPA.
+ - If the environment variable E(KRB5CCNAME) is available, the module will use this kerberos credentials cache to authenticate to the FreeIPA server.
+ - If the environment variable E(KRB5_CLIENT_KTNAME) is available, and E(KRB5CCNAME) is not; the module will use this kerberos keytab to authenticate.
+ - If GSSAPI is not available, the usage of O(ipa_pass) is required.
type: str
ipa_prot:
description:
- Protocol used by IPA server.
- - If the value is not specified in the task, the value of environment variable C(IPA_PROT) will be used instead.
- - If both the environment variable C(IPA_PROT) and the value are not specified in the task, then default value is set.
- - Environment variable fallback mechanism is added in Ansible 2.5.
+ - If the value is not specified in the task, the value of environment variable E(IPA_PROT) will be used instead.
+ - If both the environment variable E(IPA_PROT) and the value are not specified in the task, then default value is set.
type: str
choices: [ http, https ]
default: https
validate_certs:
description:
- - This only applies if C(ipa_prot) is I(https).
- - If set to C(false), the SSL certificates will not be validated.
- - This should only set to C(false) used on personally controlled sites using self-signed certificates.
+ - This only applies if O(ipa_prot) is V(https).
+ - If set to V(false), the SSL certificates will not be validated.
+ - This should only set to V(false) used on personally controlled sites using self-signed certificates.
type: bool
default: true
ipa_timeout:
description:
- Specifies idle timeout (in seconds) for the connection.
- For bulk operations, you may want to increase this in order to avoid timeout from IPA server.
- - If the value is not specified in the task, the value of environment variable C(IPA_TIMEOUT) will be used instead.
- - If both the environment variable C(IPA_TIMEOUT) and the value are not specified in the task, then default value is set.
+ - If the value is not specified in the task, the value of environment variable E(IPA_TIMEOUT) will be used instead.
+ - If both the environment variable E(IPA_TIMEOUT) and the value are not specified in the task, then default value is set.
type: int
default: 10
'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/keycloak.py b/ansible_collections/community/general/plugins/doc_fragments/keycloak.py
index 5d79fad7c..9b21ce52c 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/keycloak.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/keycloak.py
@@ -23,7 +23,7 @@ options:
auth_client_id:
description:
- - OpenID Connect I(client_id) to authenticate to the API with.
+ - OpenID Connect C(client_id) to authenticate to the API with.
type: str
default: admin-cli
@@ -34,7 +34,7 @@ options:
auth_client_secret:
description:
- - Client Secret to use in conjunction with I(auth_client_id) (if required).
+ - Client Secret to use in conjunction with O(auth_client_id) (if required).
type: str
auth_username:
@@ -69,6 +69,7 @@ options:
type: int
default: 10
version_added: 4.5.0
+
http_agent:
description:
- Configures the HTTP User-Agent header.
diff --git a/ansible_collections/community/general/plugins/doc_fragments/ldap.py b/ansible_collections/community/general/plugins/doc_fragments/ldap.py
index b321c75eb..e11ab065d 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/ldap.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/ldap.py
@@ -21,7 +21,7 @@ options:
type: str
bind_pw:
description:
- - The password to use with I(bind_dn).
+ - The password to use with O(bind_dn).
type: str
default: ''
ca_path:
@@ -29,6 +29,18 @@ options:
- Set the path to PEM file with CA certs.
type: path
version_added: "6.5.0"
+ client_cert:
+ type: path
+ description:
+ - PEM formatted certificate chain file to be used for SSL client authentication.
+ - Required if O(client_key) is defined.
+ version_added: "7.1.0"
+ client_key:
+ type: path
+ description:
+ - PEM formatted file that contains your private key to be used for SSL client authentication.
+ - Required if O(client_cert) is defined.
+ version_added: "7.1.0"
dn:
required: true
description:
@@ -40,12 +52,12 @@ options:
type: str
description:
- Set the referrals chasing behavior.
- - C(anonymous) follow referrals anonymously. This is the default behavior.
- - C(disabled) disable referrals chasing. This sets C(OPT_REFERRALS) to off.
+ - V(anonymous) follow referrals anonymously. This is the default behavior.
+ - V(disabled) disable referrals chasing. This sets C(OPT_REFERRALS) to off.
version_added: 2.0.0
server_uri:
description:
- - The I(server_uri) parameter may be a comma- or whitespace-separated list of URIs containing only the schema, the host, and the port fields.
+ - The O(server_uri) parameter may be a comma- or whitespace-separated list of URIs containing only the schema, the host, and the port fields.
- The default value lets the underlying LDAP client library look for a UNIX domain socket in its default location.
- Note that when using multiple URIs you cannot determine to which URI your client gets connected.
- For URIs containing additional fields, particularly when using commas, behavior is undefined.
@@ -58,14 +70,13 @@ options:
default: false
validate_certs:
description:
- - If set to C(false), SSL certificates will not be validated.
+ - If set to V(false), SSL certificates will not be validated.
- This should only be used on sites using self-signed certificates.
type: bool
default: true
sasl_class:
description:
- The class to use for SASL authentication.
- - Possible choices are C(external), C(gssapi).
type: str
choices: ['external', 'gssapi']
default: external
@@ -73,10 +84,9 @@ options:
xorder_discovery:
description:
- Set the behavior on how to process Xordered DNs.
- - C(enable) will perform a C(ONELEVEL) search below the superior RDN to find the matching DN.
- - C(disable) will always use the DN unmodified (as passed by the I(dn) parameter).
- - C(auto) will only perform a search if the first RDN does not contain an index number (C({x})).
- - Possible choices are C(enable), C(auto), C(disable).
+ - V(enable) will perform a C(ONELEVEL) search below the superior RDN to find the matching DN.
+ - V(disable) will always use the DN unmodified (as passed by the O(dn) parameter).
+ - V(auto) will only perform a search if the first RDN does not contain an index number (C({x})).
type: str
choices: ['enable', 'auto', 'disable']
default: auto
diff --git a/ansible_collections/community/general/plugins/doc_fragments/lxca_common.py b/ansible_collections/community/general/plugins/doc_fragments/lxca_common.py
index b5e7d7294..eed6727c2 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/lxca_common.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/lxca_common.py
@@ -30,7 +30,7 @@ options:
auth_url:
description:
- - lxca https full web address
+ - lxca HTTPS full web address.
type: str
required: true
@@ -38,7 +38,6 @@ requirements:
- pylxca
notes:
- - Additional detail about pylxca can be found at U(https://github.com/lenovo/pylxca)
- - Playbooks using these modules can be found at U(https://github.com/lenovo/ansible.lenovo-lxca)
- - Check mode is not supported.
+ - Additional detail about pylxca can be found at U(https://github.com/lenovo/pylxca).
+ - Playbooks using these modules can be found at U(https://github.com/lenovo/ansible.lenovo-lxca).
'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/manageiq.py b/ansible_collections/community/general/plugins/doc_fragments/manageiq.py
index 030d68238..8afc183a5 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/manageiq.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/manageiq.py
@@ -21,30 +21,30 @@ options:
suboptions:
url:
description:
- - ManageIQ environment url. C(MIQ_URL) env var if set. otherwise, it is required to pass it.
+ - ManageIQ environment URL. E(MIQ_URL) environment variable if set. Otherwise, it is required to pass it.
type: str
required: false
username:
description:
- - ManageIQ username. C(MIQ_USERNAME) env var if set. otherwise, required if no token is passed in.
+ - ManageIQ username. E(MIQ_USERNAME) environment variable if set. Otherwise, required if no token is passed in.
type: str
password:
description:
- - ManageIQ password. C(MIQ_PASSWORD) env var if set. otherwise, required if no token is passed in.
+ - ManageIQ password. E(MIQ_PASSWORD) environment variable if set. Otherwise, required if no token is passed in.
type: str
token:
description:
- - ManageIQ token. C(MIQ_TOKEN) env var if set. otherwise, required if no username or password is passed in.
+ - ManageIQ token. E(MIQ_TOKEN) environment variable if set. Otherwise, required if no username or password is passed in.
type: str
validate_certs:
description:
- - Whether SSL certificates should be verified for HTTPS requests. defaults to True.
+ - Whether SSL certificates should be verified for HTTPS requests.
type: bool
default: true
aliases: [ verify_ssl ]
ca_cert:
description:
- - The path to a CA bundle file or directory with certificates. defaults to None.
+ - The path to a CA bundle file or directory with certificates.
type: str
aliases: [ ca_bundle_path ]
diff --git a/ansible_collections/community/general/plugins/doc_fragments/nomad.py b/ansible_collections/community/general/plugins/doc_fragments/nomad.py
index b19404e83..1571c211c 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/nomad.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/nomad.py
@@ -18,6 +18,12 @@ options:
- FQDN of Nomad server.
required: true
type: str
+ port:
+ description:
+ - Port of Nomad server.
+ type: int
+ default: 4646
+ version_added: 8.0.0
use_ssl:
description:
- Use TLS/SSL connection.
@@ -47,6 +53,6 @@ options:
type: str
token:
description:
- - ACL token for authentification.
+ - ACL token for authentication.
type: str
'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/onepassword.py b/ansible_collections/community/general/plugins/doc_fragments/onepassword.py
new file mode 100644
index 000000000..4035f8179
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/onepassword.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2023, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ DOCUMENTATION = r'''
+requirements:
+ - See U(https://support.1password.com/command-line/)
+options:
+ master_password:
+ description: The password used to unlock the specified vault.
+ aliases: ['vault_password']
+ type: str
+ section:
+ description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from any section.
+ domain:
+ description: Domain of 1Password.
+ default: '1password.com'
+ type: str
+ subdomain:
+ description: The 1Password subdomain to authenticate against.
+ type: str
+ account_id:
+ description: The account ID to target.
+ type: str
+ username:
+ description: The username used to sign in.
+ type: str
+ secret_key:
+ description: The secret key used when performing an initial sign in.
+ type: str
+ service_account_token:
+ description:
+ - The access key for a service account.
+ - Only works with 1Password CLI version 2 or later.
+ type: str
+ vault:
+ description: Vault containing the item to retrieve (case-insensitive). If absent will search all vaults.
+ type: str
+ connect_host:
+ description: The host for 1Password Connect. Must be used in combination with O(connect_token).
+ type: str
+ env:
+ - name: OP_CONNECT_HOST
+ version_added: 8.1.0
+ connect_token:
+ description: The token for 1Password Connect. Must be used in combination with O(connect_host).
+ type: str
+ env:
+ - name: OP_CONNECT_TOKEN
+ version_added: 8.1.0
+'''
+
+ LOOKUP = r'''
+options:
+ service_account_token:
+ env:
+ - name: OP_SERVICE_ACCOUNT_TOKEN
+ version_added: 8.2.0
+notes:
+ - This lookup will use an existing 1Password session if one exists. If not, and you have already
+ performed an initial sign in (meaning C(~/.op/config), C(~/.config/op/config) or C(~/.config/.op/config) exists), then only the
+ O(master_password) is required. You may optionally specify O(subdomain) in this scenario, otherwise the last used subdomain will be used by C(op).
+ - This lookup can perform an initial login by providing O(subdomain), O(username), O(secret_key), and O(master_password).
+ - Can target a specific account by providing the O(account_id).
+ - Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal credentials
+ needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or greater in strength
+ to the 1Password master password.
+ - This lookup stores potentially sensitive data from 1Password as Ansible facts.
+ Facts are subject to caching if enabled, which means this data could be stored in clear text
+ on disk or in a database.
+ - Tested with C(op) version 2.7.2.
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/oneview.py b/ansible_collections/community/general/plugins/doc_fragments/oneview.py
index 54288e51f..a88226d7d 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/oneview.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/oneview.py
@@ -15,7 +15,7 @@ class ModuleDocFragment(object):
options:
config:
description:
- - Path to a .json configuration file containing the OneView client configuration.
+ - Path to a JSON configuration file containing the OneView client configuration.
The configuration file is optional and when used should be present in the host running the ansible commands.
If the file path is not provided, the configuration will be loaded from environment variables.
For links to example configuration files or how to use the environment variables verify the notes section.
@@ -42,7 +42,7 @@ options:
type: str
requirements:
- - python >= 2.7.9
+ - Python >= 2.7.9
notes:
- "A sample configuration file for the config parameter can be found at:
@@ -70,11 +70,11 @@ options:
options:
params:
description:
- - List of params to delimit, filter and sort the list of resources.
- - "params allowed:
- - C(start): The first item to return, using 0-based indexing.
- - C(count): The number of resources to return.
- - C(filter): A general filter/query string to narrow the list of items returned.
- - C(sort): The sort order of the returned data set."
+ - List of parameters to delimit, filter and sort the list of resources.
+ - "Parameter keys allowed are:"
+ - "C(start): The first item to return, using 0-based indexing."
+ - "C(count): The number of resources to return."
+ - "C(filter): A general filter/query string to narrow the list of items returned."
+ - "C(sort): The sort order of the returned data set."
type: dict
'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/online.py b/ansible_collections/community/general/plugins/doc_fragments/online.py
index d7e13765b..37e39cfa2 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/online.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/online.py
@@ -20,7 +20,7 @@ options:
aliases: [ oauth_token ]
api_url:
description:
- - Online API URL
+ - Online API URL.
type: str
default: 'https://api.online.net'
aliases: [ base_url ]
@@ -36,10 +36,10 @@ options:
type: bool
default: true
notes:
- - Also see the API documentation on U(https://console.online.net/en/api/)
- - If C(api_token) is not set within the module, the following
+ - Also see the API documentation on U(https://console.online.net/en/api/).
+ - If O(api_token) is not set within the module, the following
environment variables can be used in decreasing order of precedence
- C(ONLINE_TOKEN), C(ONLINE_API_KEY), C(ONLINE_OAUTH_TOKEN), C(ONLINE_API_TOKEN)
- - If one wants to use a different C(api_url) one can also set the C(ONLINE_API_URL)
+ E(ONLINE_TOKEN), E(ONLINE_API_KEY), E(ONLINE_OAUTH_TOKEN), E(ONLINE_API_TOKEN).
+ - If one wants to use a different O(api_url) one can also set the E(ONLINE_API_URL)
environment variable.
'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/opennebula.py b/ansible_collections/community/general/plugins/doc_fragments/opennebula.py
index 0fc323271..567faf1a7 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/opennebula.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/opennebula.py
@@ -15,26 +15,26 @@ options:
api_url:
description:
- The ENDPOINT URL of the XMLRPC server.
- - If not specified then the value of the ONE_URL environment variable, if any, is used.
+ - If not specified then the value of the E(ONE_URL) environment variable, if any, is used.
type: str
aliases:
- api_endpoint
api_username:
description:
- The name of the user for XMLRPC authentication.
- - If not specified then the value of the ONE_USERNAME environment variable, if any, is used.
+ - If not specified then the value of the E(ONE_USERNAME) environment variable, if any, is used.
type: str
api_password:
description:
- The password or token for XMLRPC authentication.
- - If not specified then the value of the ONE_PASSWORD environment variable, if any, is used.
+ - If not specified then the value of the E(ONE_PASSWORD) environment variable, if any, is used.
type: str
aliases:
- api_token
validate_certs:
description:
- - Whether to validate the SSL certificates or not.
- - This parameter is ignored if PYTHONHTTPSVERIFY environment variable is used.
+ - Whether to validate the TLS/SSL certificates or not.
+ - This parameter is ignored if E(PYTHONHTTPSVERIFY) environment variable is used.
type: bool
default: true
wait_timeout:
diff --git a/ansible_collections/community/general/plugins/doc_fragments/openswitch.py b/ansible_collections/community/general/plugins/doc_fragments/openswitch.py
index 9d5f0be74..a203a3b40 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/openswitch.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/openswitch.py
@@ -23,7 +23,7 @@ options:
port:
description:
- Specifies the port to use when building the connection to the remote
- device. This value applies to either I(cli) or I(rest). The port
+ device. This value applies to either O(transport=cli) or O(transport=rest). The port
value will default to the appropriate transport common port if
none is provided in the task. (cli=22, http=80, https=443). Note
this argument does not affect the SSH transport.
@@ -36,15 +36,15 @@ options:
either the CLI login or the eAPI authentication depending on which
transport is used. Note this argument does not affect the SSH
transport. If the value is not specified in the task, the value of
- environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
+ environment variable E(ANSIBLE_NET_USERNAME) will be used instead.
type: str
password:
description:
- Specifies the password to use to authenticate the connection to
- the remote device. This is a common argument used for either I(cli)
- or I(rest) transports. Note this argument does not affect the SSH
+ the remote device. This is a common argument used for either O(transport=cli)
+ or O(transport=rest). Note this argument does not affect the SSH
transport. If the value is not specified in the task, the value of
- environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
+ environment variable E(ANSIBLE_NET_PASSWORD) will be used instead.
type: str
timeout:
description:
@@ -56,29 +56,29 @@ options:
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
- the remote device. This argument is only used for the I(cli)
- transports. If the value is not specified in the task, the value of
- environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
+ the remote device. This argument is only used for O(transport=cli).
+ If the value is not specified in the task, the value of
+ environment variable E(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
type: path
transport:
description:
- Configures the transport connection to use when connecting to the
remote device. The transport argument supports connectivity to the
- device over ssh, cli or REST.
+ device over SSH (V(ssh)), CLI (V(cli)), or REST (V(rest)).
required: true
type: str
choices: [ cli, rest, ssh ]
default: ssh
use_ssl:
description:
- - Configures the I(transport) to use SSL if set to C(true) only when the
- I(transport) argument is configured as rest. If the transport
- argument is not I(rest), this value is ignored.
+ - Configures the O(transport) to use SSL if set to V(true) only when the
+ O(transport) argument is configured as rest. If the transport
+ argument is not V(rest), this value is ignored.
type: bool
default: true
provider:
description:
- - Convenience method that allows all I(openswitch) arguments to be passed as
+ - Convenience method that allows all C(openswitch) arguments to be passed as
a dict object. All constraints (required, choices, etc) must be
met either by individual arguments or values in this dict.
type: dict
diff --git a/ansible_collections/community/general/plugins/doc_fragments/oracle.py b/ansible_collections/community/general/plugins/doc_fragments/oracle.py
index 9ca4706ba..ff0ed2fd5 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/oracle.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/oracle.py
@@ -10,36 +10,35 @@ __metaclass__ = type
class ModuleDocFragment(object):
DOCUMENTATION = """
requirements:
- - "python >= 2.7"
- - Python SDK for Oracle Cloud Infrastructure U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io)
+ - Python SDK for Oracle Cloud Infrastructure U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io)
notes:
- - For OCI python sdk configuration, please refer to
- U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/configuration.html)
+ - For OCI Python SDK configuration, please refer to
+ U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/configuration.html).
options:
config_file_location:
description:
- - Path to configuration file. If not set then the value of the OCI_CONFIG_FILE environment variable,
- if any, is used. Otherwise, defaults to ~/.oci/config.
+ - Path to configuration file. If not set then the value of the E(OCI_CONFIG_FILE) environment variable,
+ if any, is used. Otherwise, defaults to C(~/.oci/config).
type: str
config_profile_name:
description:
- - The profile to load from the config file referenced by C(config_file_location). If not set, then the
- value of the OCI_CONFIG_PROFILE environment variable, if any, is used. Otherwise, defaults to the
- "DEFAULT" profile in C(config_file_location).
+ - The profile to load from the config file referenced by O(config_file_location). If not set, then the
+ value of the E(OCI_CONFIG_PROFILE) environment variable, if any, is used. Otherwise, defaults to the
+ C(DEFAULT) profile in O(config_file_location).
default: "DEFAULT"
type: str
api_user:
description:
- The OCID of the user, on whose behalf, OCI APIs are invoked. If not set, then the
- value of the OCI_USER_OCID environment variable, if any, is used. This option is required if the user
- is not specified through a configuration file (See C(config_file_location)). To get the user's OCID,
+ value of the E(OCI_USER_OCID) environment variable, if any, is used. This option is required if the user
+ is not specified through a configuration file (See O(config_file_location)). To get the user's OCID,
please refer U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm).
type: str
api_user_fingerprint:
description:
- - Fingerprint for the key pair being used. If not set, then the value of the OCI_USER_FINGERPRINT
+ - Fingerprint for the key pair being used. If not set, then the value of the E(OCI_USER_FINGERPRINT)
environment variable, if any, is used. This option is required if the key fingerprint is not
- specified through a configuration file (See C(config_file_location)). To get the key pair's
+ specified through a configuration file (See O(config_file_location)). To get the key pair's
fingerprint value please refer
U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm).
type: str
@@ -47,21 +46,21 @@ class ModuleDocFragment(object):
description:
- Full path and filename of the private key (in PEM format). If not set, then the value of the
OCI_USER_KEY_FILE variable, if any, is used. This option is required if the private key is
- not specified through a configuration file (See C(config_file_location)). If the key is encrypted
- with a pass-phrase, the C(api_user_key_pass_phrase) option must also be provided.
+ not specified through a configuration file (See O(config_file_location)). If the key is encrypted
+ with a pass-phrase, the O(api_user_key_pass_phrase) option must also be provided.
type: path
api_user_key_pass_phrase:
description:
- - Passphrase used by the key referenced in C(api_user_key_file), if it is encrypted. If not set, then
+ - Passphrase used by the key referenced in O(api_user_key_file), if it is encrypted. If not set, then
the value of the OCI_USER_KEY_PASS_PHRASE variable, if any, is used. This option is required if the
- key passphrase is not specified through a configuration file (See C(config_file_location)).
+ key passphrase is not specified through a configuration file (See O(config_file_location)).
type: str
auth_type:
description:
- - The type of authentication to use for making API requests. By default C(auth_type="api_key") based
- authentication is performed and the API key (see I(api_user_key_file)) in your config file will be
+ - The type of authentication to use for making API requests. By default O(auth_type=api_key) based
+ authentication is performed and the API key (see O(api_user_key_file)) in your config file will be
used. If this 'auth_type' module option is not specified, the value of the OCI_ANSIBLE_AUTH_TYPE,
- if any, is used. Use C(auth_type="instance_principal") to use instance principal based authentication
+ if any, is used. Use O(auth_type=instance_principal) to use instance principal based authentication
when running ansible playbooks within an OCI compute instance.
choices: ['api_key', 'instance_principal']
default: 'api_key'
@@ -70,14 +69,14 @@ class ModuleDocFragment(object):
description:
- OCID of your tenancy. If not set, then the value of the OCI_TENANCY variable, if any, is
used. This option is required if the tenancy OCID is not specified through a configuration file
- (See C(config_file_location)). To get the tenancy OCID, please refer
- U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm)
+ (See O(config_file_location)). To get the tenancy OCID, please refer to
+ U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm).
type: str
region:
description:
- The Oracle Cloud Infrastructure region to use for all OCI API requests. If not set, then the
value of the OCI_REGION variable, if any, is used. This option is required if the region is
- not specified through a configuration file (See C(config_file_location)). Please refer to
+ not specified through a configuration file (See O(config_file_location)). Please refer to
U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/regions.htm) for more information
on OCI regions.
type: str
diff --git a/ansible_collections/community/general/plugins/doc_fragments/oracle_creatable_resource.py b/ansible_collections/community/general/plugins/doc_fragments/oracle_creatable_resource.py
index 529381919..9d2cc07c9 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/oracle_creatable_resource.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/oracle_creatable_resource.py
@@ -14,13 +14,13 @@ class ModuleDocFragment(object):
description: Whether to attempt non-idempotent creation of a resource. By default, create resource is an
idempotent operation, and doesn't create the resource if it already exists. Setting this option
to true, forcefully creates a copy of the resource, even if it already exists.This option is
- mutually exclusive with I(key_by).
+ mutually exclusive with O(key_by).
default: false
type: bool
key_by:
description: The list of comma-separated attributes of this resource which should be used to uniquely
identify an instance of the resource. By default, all the attributes of a resource except
- I(freeform_tags) are used to uniquely identify a resource.
+ O(freeform_tags) are used to uniquely identify a resource.
type: list
elements: str
"""
diff --git a/ansible_collections/community/general/plugins/doc_fragments/oracle_display_name_option.py b/ansible_collections/community/general/plugins/doc_fragments/oracle_display_name_option.py
index eae5f4459..b6bc0f229 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/oracle_display_name_option.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/oracle_display_name_option.py
@@ -11,7 +11,7 @@ class ModuleDocFragment(object):
DOCUMENTATION = """
options:
display_name:
- description: Use I(display_name) along with the other options to return only resources that match the given
+ description: Use O(display_name) along with the other options to return only resources that match the given
display name exactly.
type: str
"""
diff --git a/ansible_collections/community/general/plugins/doc_fragments/oracle_name_option.py b/ansible_collections/community/general/plugins/doc_fragments/oracle_name_option.py
index 362071f94..523eed702 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/oracle_name_option.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/oracle_name_option.py
@@ -11,7 +11,7 @@ class ModuleDocFragment(object):
DOCUMENTATION = """
options:
name:
- description: Use I(name) along with the other options to return only resources that match the given name
+ description: Use O(name) along with the other options to return only resources that match the given name
exactly.
type: str
"""
diff --git a/ansible_collections/community/general/plugins/doc_fragments/oracle_wait_options.py b/ansible_collections/community/general/plugins/doc_fragments/oracle_wait_options.py
index ce7ea776e..0ba253232 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/oracle_wait_options.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/oracle_wait_options.py
@@ -15,13 +15,13 @@ class ModuleDocFragment(object):
default: true
type: bool
wait_timeout:
- description: Time, in seconds, to wait when I(wait=true).
+ description: Time, in seconds, to wait when O(wait=true).
default: 1200
type: int
wait_until:
- description: The lifecycle state to wait for the resource to transition into when I(wait=true). By default,
- when I(wait=true), we wait for the resource to get into ACTIVE/ATTACHED/AVAILABLE/PROVISIONED/
- RUNNING applicable lifecycle state during create operation & to get into DELETED/DETACHED/
+ description: The lifecycle state to wait for the resource to transition into when O(wait=true). By default,
+ when O(wait=true), we wait for the resource to get into ACTIVE/ATTACHED/AVAILABLE/PROVISIONED/
+ RUNNING applicable lifecycle state during create operation and to get into DELETED/DETACHED/
TERMINATED lifecycle state during delete operation.
type: str
"""
diff --git a/ansible_collections/community/general/plugins/doc_fragments/pritunl.py b/ansible_collections/community/general/plugins/doc_fragments/pritunl.py
index 51ab979b5..396ee0866 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/pritunl.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/pritunl.py
@@ -38,7 +38,7 @@ options:
default: true
description:
- If certificates should be validated or not.
- - This should never be set to C(false), except if you are very sure that
+ - This should never be set to V(false), except if you are very sure that
your connection to the server can not be subject to a Man In The Middle
attack.
"""
diff --git a/ansible_collections/community/general/plugins/doc_fragments/proxmox.py b/ansible_collections/community/general/plugins/doc_fragments/proxmox.py
index e39af4f3a..4972da498 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/proxmox.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/proxmox.py
@@ -24,21 +24,23 @@ options:
api_password:
description:
- Specify the password to authenticate with.
- - You can use C(PROXMOX_PASSWORD) environment variable.
+ - You can use E(PROXMOX_PASSWORD) environment variable.
type: str
api_token_id:
description:
- Specify the token ID.
+ - Requires C(proxmoxer>=1.1.0) to work.
type: str
version_added: 1.3.0
api_token_secret:
description:
- Specify the token secret.
+ - Requires C(proxmoxer>=1.1.0) to work.
type: str
version_added: 1.3.0
validate_certs:
description:
- - If C(false), SSL certificates will not be validated.
+ - If V(false), SSL certificates will not be validated.
- This should only be used on personally controlled sites using self-signed certificates.
type: bool
default: false
@@ -55,7 +57,7 @@ options:
node:
description:
- Proxmox VE node on which to operate.
- - Only required for I(state=present).
+ - Only required for O(state=present).
- For every other states it will be autodiscovered.
type: str
pool:
diff --git a/ansible_collections/community/general/plugins/doc_fragments/purestorage.py b/ansible_collections/community/general/plugins/doc_fragments/purestorage.py
index 8db8c3b3d..823397763 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/purestorage.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/purestorage.py
@@ -32,11 +32,10 @@ options:
- FlashBlade API token for admin privileged user.
type: str
notes:
- - This module requires the C(purity_fb) Python library
- - You must set C(PUREFB_URL) and C(PUREFB_API) environment variables
- if I(fb_url) and I(api_token) arguments are not passed to the module directly
+ - This module requires the C(purity_fb) Python library.
+ - You must set E(PUREFB_URL) and E(PUREFB_API) environment variables
+ if O(fb_url) and O(api_token) arguments are not passed to the module directly.
requirements:
- - python >= 2.7
- purity_fb >= 1.1
'''
@@ -54,10 +53,9 @@ options:
type: str
required: true
notes:
- - This module requires the C(purestorage) Python library
- - You must set C(PUREFA_URL) and C(PUREFA_API) environment variables
- if I(fa_url) and I(api_token) arguments are not passed to the module directly
+ - This module requires the C(purestorage) Python library.
+ - You must set E(PUREFA_URL) and E(PUREFA_API) environment variables
+ if O(fa_url) and O(api_token) arguments are not passed to the module directly.
requirements:
- - python >= 2.7
- purestorage
'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/rackspace.py b/ansible_collections/community/general/plugins/doc_fragments/rackspace.py
index 9e2231602..f28be777c 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/rackspace.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/rackspace.py
@@ -15,18 +15,18 @@ class ModuleDocFragment(object):
options:
api_key:
description:
- - Rackspace API key, overrides I(credentials).
+ - Rackspace API key, overrides O(credentials).
type: str
aliases: [ password ]
credentials:
description:
- - File to find the Rackspace credentials in. Ignored if I(api_key) and
- I(username) are provided.
+ - File to find the Rackspace credentials in. Ignored if O(api_key) and
+ O(username) are provided.
type: path
aliases: [ creds_file ]
env:
description:
- - Environment as configured in I(~/.pyrax.cfg),
+ - Environment as configured in C(~/.pyrax.cfg),
see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration).
type: str
region:
@@ -35,7 +35,7 @@ options:
type: str
username:
description:
- - Rackspace username, overrides I(credentials).
+ - Rackspace username, overrides O(credentials).
type: str
validate_certs:
description:
@@ -43,15 +43,14 @@ options:
type: bool
aliases: [ verify_ssl ]
requirements:
- - python >= 2.6
- pyrax
notes:
- - The following environment variables can be used, C(RAX_USERNAME),
- C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
- - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
- appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
- - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
- - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
+ - The following environment variables can be used, E(RAX_USERNAME),
+ E(RAX_API_KEY), E(RAX_CREDS_FILE), E(RAX_CREDENTIALS), E(RAX_REGION).
+ - E(RAX_CREDENTIALS) and E(RAX_CREDS_FILE) point to a credentials file
+ appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating).
+ - E(RAX_USERNAME) and E(RAX_API_KEY) obviate the use of a credentials file.
+ - E(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...).
'''
# Documentation fragment including attributes to enable communication
@@ -61,23 +60,23 @@ options:
api_key:
type: str
description:
- - Rackspace API key, overrides I(credentials).
+ - Rackspace API key, overrides O(credentials).
aliases: [ password ]
auth_endpoint:
type: str
description:
- The URI of the authentication service.
- - If not specified will be set to U(https://identity.api.rackspacecloud.com/v2.0/)
+ - If not specified will be set to U(https://identity.api.rackspacecloud.com/v2.0/).
credentials:
type: path
description:
- - File to find the Rackspace credentials in. Ignored if I(api_key) and
- I(username) are provided.
+ - File to find the Rackspace credentials in. Ignored if O(api_key) and
+ O(username) are provided.
aliases: [ creds_file ]
env:
type: str
description:
- - Environment as configured in I(~/.pyrax.cfg),
+ - Environment as configured in C(~/.pyrax.cfg),
see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration).
identity_type:
type: str
@@ -99,20 +98,23 @@ options:
username:
type: str
description:
- - Rackspace username, overrides I(credentials).
+ - Rackspace username, overrides O(credentials).
validate_certs:
description:
- Whether or not to require SSL validation of API endpoints.
type: bool
aliases: [ verify_ssl ]
+deprecated:
+ removed_in: 9.0.0
+ why: This module relies on the deprecated package pyrax.
+ alternative: Use the Openstack modules instead.
requirements:
- - python >= 2.6
- pyrax
notes:
- - The following environment variables can be used, C(RAX_USERNAME),
- C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
- - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
- appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
- - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
- - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
+ - The following environment variables can be used, E(RAX_USERNAME),
+ E(RAX_API_KEY), E(RAX_CREDS_FILE), E(RAX_CREDENTIALS), E(RAX_REGION).
+ - E(RAX_CREDENTIALS) and E(RAX_CREDS_FILE) points to a credentials file
+ appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating).
+ - E(RAX_USERNAME) and E(RAX_API_KEY) obviate the use of a credentials file.
+ - E(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...).
'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/redis.py b/ansible_collections/community/general/plugins/doc_fragments/redis.py
index 2d4033051..fafb52c86 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/redis.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/redis.py
@@ -46,8 +46,8 @@ options:
default: true
ca_certs:
description:
- - Path to root certificates file. If not set and I(tls) is
- set to C(true), certifi ca-certificates will be used.
+ - Path to root certificates file. If not set and O(tls) is
+ set to V(true), certifi ca-certificates will be used.
type: str
requirements: [ "redis", "certifi" ]
diff --git a/ansible_collections/community/general/plugins/doc_fragments/scaleway.py b/ansible_collections/community/general/plugins/doc_fragments/scaleway.py
index b08d11dbb..bdb0dd056 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/scaleway.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/scaleway.py
@@ -42,10 +42,10 @@ options:
type: bool
default: true
notes:
- - Also see the API documentation on U(https://developer.scaleway.com/)
- - If C(api_token) is not set within the module, the following
+ - Also see the API documentation on U(https://developer.scaleway.com/).
+ - If O(api_token) is not set within the module, the following
environment variables can be used in decreasing order of precedence
- C(SCW_TOKEN), C(SCW_API_KEY), C(SCW_OAUTH_TOKEN) or C(SCW_API_TOKEN).
- - If one wants to use a different C(api_url) one can also set the C(SCW_API_URL)
+ E(SCW_TOKEN), E(SCW_API_KEY), E(SCW_OAUTH_TOKEN) or E(SCW_API_TOKEN).
+ - If one wants to use a different O(api_url) one can also set the E(SCW_API_URL)
environment variable.
'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/utm.py b/ansible_collections/community/general/plugins/doc_fragments/utm.py
index 73ad80503..3e0bc6e10 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/utm.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/utm.py
@@ -14,7 +14,7 @@ options:
headers:
description:
- A dictionary of additional headers to be sent to POST and PUT requests.
- - Is needed for some modules
+ - Is needed for some modules.
type: dict
required: false
default: {}
@@ -30,8 +30,9 @@ options:
default: 4444
utm_token:
description:
- - "The token used to identify at the REST-API. See U(https://www.sophos.com/en-us/medialibrary/\
- PDFs/documentation/UTMonAWS/Sophos-UTM-RESTful-API.pdf?la=en), Chapter 2.4.2."
+ - "The token used to identify at the REST-API. See
+ U(https://www.sophos.com/en-us/medialibrary/PDFs/documentation/UTMonAWS/Sophos-UTM-RESTful-API.pdf?la=en),
+ Chapter 2.4.2."
type: str
required: true
utm_protocol:
@@ -48,8 +49,8 @@ options:
state:
description:
- The desired state of the object.
- - C(present) will create or update an object
- - C(absent) will delete an object if it was present
+ - V(present) will create or update an object.
+ - V(absent) will delete an object if it was present.
type: str
choices: [ absent, present ]
default: present
diff --git a/ansible_collections/community/general/plugins/doc_fragments/vexata.py b/ansible_collections/community/general/plugins/doc_fragments/vexata.py
index ff79613ee..041f404d2 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/vexata.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/vexata.py
@@ -30,17 +30,19 @@ options:
user:
description:
- Vexata API user with administrative privileges.
+ - Uses the E(VEXATA_USER) environment variable as a fallback.
required: false
type: str
password:
description:
- Vexata API user password.
+ - Uses the E(VEXATA_PASSWORD) environment variable as a fallback.
required: false
type: str
validate_certs:
description:
- - Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted.
- - If set to C(true), please make sure Python >= 2.7.9 is installed on the given machine.
+ - Allows connection when SSL certificates are not valid. Set to V(false) when certificates are not trusted.
+ - If set to V(true), please make sure Python >= 2.7.9 is installed on the given machine.
required: false
type: bool
default: false
@@ -48,7 +50,6 @@ options:
requirements:
- Vexata VX100 storage array with VXOS >= v3.5.0 on storage array
- vexatapi >= 0.0.1
- - python >= 2.7
- - VEXATA_USER and VEXATA_PASSWORD environment variables must be set if
+ - E(VEXATA_USER) and E(VEXATA_PASSWORD) environment variables must be set if
user and password arguments are not passed to the module directly.
'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/xenserver.py b/ansible_collections/community/general/plugins/doc_fragments/xenserver.py
index eaee17384..681d959fa 100644
--- a/ansible_collections/community/general/plugins/doc_fragments/xenserver.py
+++ b/ansible_collections/community/general/plugins/doc_fragments/xenserver.py
@@ -15,27 +15,27 @@ options:
hostname:
description:
- The hostname or IP address of the XenServer host or XenServer pool master.
- - If the value is not specified in the task, the value of environment variable C(XENSERVER_HOST) will be used instead.
+ - If the value is not specified in the task, the value of environment variable E(XENSERVER_HOST) will be used instead.
type: str
default: localhost
aliases: [ host, pool ]
username:
description:
- The username to use for connecting to XenServer.
- - If the value is not specified in the task, the value of environment variable C(XENSERVER_USER) will be used instead.
+ - If the value is not specified in the task, the value of environment variable E(XENSERVER_USER) will be used instead.
type: str
default: root
aliases: [ admin, user ]
password:
description:
- The password to use for connecting to XenServer.
- - If the value is not specified in the task, the value of environment variable C(XENSERVER_PASSWORD) will be used instead.
+ - If the value is not specified in the task, the value of environment variable E(XENSERVER_PASSWORD) will be used instead.
type: str
aliases: [ pass, pwd ]
validate_certs:
description:
- - Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted.
- - If the value is not specified in the task, the value of environment variable C(XENSERVER_VALIDATE_CERTS) will be used instead.
+ - Allows connection when SSL certificates are not valid. Set to V(false) when certificates are not trusted.
+ - If the value is not specified in the task, the value of environment variable E(XENSERVER_VALIDATE_CERTS) will be used instead.
type: bool
default: true
'''
diff --git a/ansible_collections/community/general/plugins/filter/from_csv.py b/ansible_collections/community/general/plugins/filter/from_csv.py
index 6472b67b1..310138d49 100644
--- a/ansible_collections/community/general/plugins/filter/from_csv.py
+++ b/ansible_collections/community/general/plugins/filter/from_csv.py
@@ -23,7 +23,7 @@ DOCUMENTATION = '''
dialect:
description:
- The CSV dialect to use when parsing the CSV file.
- - Possible values include C(excel), C(excel-tab) or C(unix).
+ - Possible values include V(excel), V(excel-tab) or V(unix).
type: str
default: excel
fieldnames:
@@ -35,19 +35,19 @@ DOCUMENTATION = '''
delimiter:
description:
- A one-character string used to separate fields.
- - When using this parameter, you change the default value used by I(dialect).
+ - When using this parameter, you change the default value used by O(dialect).
- The default value depends on the dialect used.
type: str
skipinitialspace:
description:
- Whether to ignore any whitespaces immediately following the delimiter.
- - When using this parameter, you change the default value used by I(dialect).
+ - When using this parameter, you change the default value used by O(dialect).
- The default value depends on the dialect used.
type: bool
strict:
description:
- Whether to raise an exception on bad CSV input.
- - When using this parameter, you change the default value used by I(dialect).
+ - When using this parameter, you change the default value used by O(dialect).
- The default value depends on the dialect used.
type: bool
'''
@@ -56,7 +56,7 @@ EXAMPLES = '''
- name: Parse a CSV file's contents
ansible.builtin.debug:
msg: >-
- {{ csv_data | community.genera.from_csv(dialect='unix') }}
+ {{ csv_data | community.general.from_csv(dialect='unix') }}
vars:
csv_data: |
Column 1,Value
diff --git a/ansible_collections/community/general/plugins/filter/from_ini.py b/ansible_collections/community/general/plugins/filter/from_ini.py
new file mode 100644
index 000000000..d68b51092
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/from_ini.py
@@ -0,0 +1,99 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2023, Steffen Scheib <steffen@scheib.me>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+DOCUMENTATION = r'''
+ name: from_ini
+ short_description: Converts INI text input into a dictionary
+ version_added: 8.2.0
+ author: Steffen Scheib (@sscheib)
+ description:
+ - Converts INI text input into a dictionary.
+ options:
+ _input:
+ description: A string containing an INI document.
+ type: string
+ required: true
+'''
+
+EXAMPLES = r'''
+ - name: Slurp an INI file
+ ansible.builtin.slurp:
+ src: /etc/rhsm/rhsm.conf
+ register: rhsm_conf
+
+ - name: Display the INI file as dictionary
+ ansible.builtin.debug:
+ var: rhsm_conf.content | b64decode | community.general.from_ini
+
+ - name: Set a new dictionary fact with the contents of the INI file
+ ansible.builtin.set_fact:
+ rhsm_dict: >-
+ {{
+ rhsm_conf.content | b64decode | community.general.from_ini
+ }}
+'''
+
+RETURN = '''
+ _value:
+ description: A dictionary representing the INI file.
+ type: dictionary
+'''
+
+__metaclass__ = type
+
+from ansible.errors import AnsibleFilterError
+from ansible.module_utils.six import string_types
+from ansible.module_utils.six.moves import StringIO
+from ansible.module_utils.six.moves.configparser import ConfigParser
+from ansible.module_utils.common.text.converters import to_native
+
+
+class IniParser(ConfigParser):
+ ''' Implements a configparser which is able to return a dict '''
+
+ def __init__(self):
+ super().__init__()
+ self.optionxform = str
+
+ def as_dict(self):
+ d = dict(self._sections)
+ for k in d:
+ d[k] = dict(self._defaults, **d[k])
+ d[k].pop('__name__', None)
+
+ if self._defaults:
+ d['DEFAULT'] = dict(self._defaults)
+
+ return d
+
+
+def from_ini(obj):
+ ''' Read the given string as INI file and return a dict '''
+
+ if not isinstance(obj, string_types):
+ raise AnsibleFilterError(f'from_ini requires a str, got {type(obj)}')
+
+ parser = IniParser()
+
+ try:
+ parser.read_file(StringIO(obj))
+ except Exception as ex:
+ raise AnsibleFilterError(f'from_ini failed to parse given string: '
+ f'{to_native(ex)}', orig_exc=ex)
+
+ return parser.as_dict()
+
+
+class FilterModule(object):
+ ''' Query filter '''
+
+ def filters(self):
+
+ return {
+ 'from_ini': from_ini
+ }
diff --git a/ansible_collections/community/general/plugins/filter/jc.py b/ansible_collections/community/general/plugins/filter/jc.py
index 3aa8d20a5..2fe3ef9d7 100644
--- a/ansible_collections/community/general/plugins/filter/jc.py
+++ b/ansible_collections/community/general/plugins/filter/jc.py
@@ -25,17 +25,17 @@ DOCUMENTATION = '''
parser:
description:
- The correct parser for the input data.
- - For example C(ifconfig).
+ - For example V(ifconfig).
- "Note: use underscores instead of dashes (if any) in the parser module name."
- See U(https://github.com/kellyjonbrazil/jc#parsers) for the latest list of parsers.
type: string
required: true
quiet:
- description: Set to C(false) to not suppress warnings.
+ description: Set to V(false) to not suppress warnings.
type: boolean
default: true
raw:
- description: Set to C(true) to return pre-processed JSON.
+ description: Set to V(true) to return pre-processed JSON.
type: boolean
default: false
requirements:
diff --git a/ansible_collections/community/general/plugins/filter/lists.py b/ansible_collections/community/general/plugins/filter/lists.py
new file mode 100644
index 000000000..d16f955c2
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/lists.py
@@ -0,0 +1,210 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.errors import AnsibleFilterError
+from ansible.module_utils.common.collections import is_sequence
+
+
+def remove_duplicates(lst):
+ seen = set()
+ seen_add = seen.add
+ result = []
+ for item in lst:
+ try:
+ if item not in seen:
+ seen_add(item)
+ result.append(item)
+ except TypeError:
+ # This happens for unhashable values `item`. If this happens,
+ # convert `seen` to a list and continue.
+ seen = list(seen)
+ seen_add = seen.append
+ if item not in seen:
+ seen_add(item)
+ result.append(item)
+ return result
+
+
+def flatten_list(lst):
+ result = []
+ for sublist in lst:
+ if not is_sequence(sublist):
+ msg = ("All arguments must be lists. %s is %s")
+ raise AnsibleFilterError(msg % (sublist, type(sublist)))
+ if len(sublist) > 0:
+ if all(is_sequence(sub) for sub in sublist):
+ for item in sublist:
+ result.append(item)
+ else:
+ result.append(sublist)
+ return result
+
+
+def lists_union(*args, **kwargs):
+ lists = args
+ flatten = kwargs.pop('flatten', False)
+
+ if kwargs:
+ # Some unused kwargs remain
+ raise AnsibleFilterError(
+ "lists_union() got unexpected keywords arguments: {0}".format(
+ ", ".join(kwargs.keys())
+ )
+ )
+
+ if flatten:
+ lists = flatten_list(args)
+
+ if not lists:
+ return []
+
+ if len(lists) == 1:
+ return lists[0]
+
+ a = lists[0]
+ for b in lists[1:]:
+ a = do_union(a, b)
+ return remove_duplicates(a)
+
+
+def do_union(a, b):
+ return a + b
+
+
+def lists_intersect(*args, **kwargs):
+ lists = args
+ flatten = kwargs.pop('flatten', False)
+
+ if kwargs:
+ # Some unused kwargs remain
+ raise AnsibleFilterError(
+ "lists_intersect() got unexpected keywords arguments: {0}".format(
+ ", ".join(kwargs.keys())
+ )
+ )
+
+ if flatten:
+ lists = flatten_list(args)
+
+ if not lists:
+ return []
+
+ if len(lists) == 1:
+ return lists[0]
+
+ a = remove_duplicates(lists[0])
+ for b in lists[1:]:
+ a = do_intersect(a, b)
+ return a
+
+
+def do_intersect(a, b):
+ isect = []
+ try:
+ other = set(b)
+ isect = [item for item in a if item in other]
+ except TypeError:
+ # This happens for unhashable values,
+ # use a list instead and redo.
+ other = list(b)
+ isect = [item for item in a if item in other]
+ return isect
+
+
+def lists_difference(*args, **kwargs):
+ lists = args
+ flatten = kwargs.pop('flatten', False)
+
+ if kwargs:
+ # Some unused kwargs remain
+ raise AnsibleFilterError(
+ "lists_difference() got unexpected keywords arguments: {0}".format(
+ ", ".join(kwargs.keys())
+ )
+ )
+
+ if flatten:
+ lists = flatten_list(args)
+
+ if not lists:
+ return []
+
+ if len(lists) == 1:
+ return lists[0]
+
+ a = remove_duplicates(lists[0])
+ for b in lists[1:]:
+ a = do_difference(a, b)
+ return a
+
+
+def do_difference(a, b):
+ diff = []
+ try:
+ other = set(b)
+ diff = [item for item in a if item not in other]
+ except TypeError:
+ # This happens for unhashable values,
+ # use a list instead and redo.
+ other = list(b)
+ diff = [item for item in a if item not in other]
+ return diff
+
+
+def lists_symmetric_difference(*args, **kwargs):
+ lists = args
+ flatten = kwargs.pop('flatten', False)
+
+ if kwargs:
+ # Some unused kwargs remain
+ raise AnsibleFilterError(
+ "lists_difference() got unexpected keywords arguments: {0}".format(
+ ", ".join(kwargs.keys())
+ )
+ )
+
+ if flatten:
+ lists = flatten_list(args)
+
+ if not lists:
+ return []
+
+ if len(lists) == 1:
+ return lists[0]
+
+ a = lists[0]
+ for b in lists[1:]:
+ a = do_symmetric_difference(a, b)
+ return a
+
+
+def do_symmetric_difference(a, b):
+ sym_diff = []
+ union = lists_union(a, b)
+ try:
+ isect = set(a) & set(b)
+ sym_diff = [item for item in union if item not in isect]
+ except TypeError:
+ # This happens for unhashable values,
+ # build the intersection of `a` and `b` backed
+ # by a list instead of a set and redo.
+ isect = lists_intersect(a, b)
+ sym_diff = [item for item in union if item not in isect]
+ return sym_diff
+
+
+class FilterModule(object):
+ ''' Ansible lists jinja2 filters '''
+
+ def filters(self):
+ return {
+ 'lists_union': lists_union,
+ 'lists_intersect': lists_intersect,
+ 'lists_difference': lists_difference,
+ 'lists_symmetric_difference': lists_symmetric_difference,
+ }
diff --git a/ansible_collections/community/general/plugins/filter/lists_difference.yml b/ansible_collections/community/general/plugins/filter/lists_difference.yml
new file mode 100644
index 000000000..9806a9f0b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/lists_difference.yml
@@ -0,0 +1,48 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION:
+ name: lists_difference
+ short_description: Difference of lists with a predictive order
+ version_added: 8.4.0
+ description:
+ - Provide a unique list of all the elements from the first which do not appear in the other lists.
+ - The order of the items in the resulting list is preserved.
+ options:
+ _input:
+ description: A list.
+ type: list
+ elements: any
+ required: true
+ flatten:
+ description: Whether to remove one hierarchy level from the input list.
+ type: boolean
+ default: false
+ author:
+ - Christoph Fiehe (@cfiehe)
+
+EXAMPLES: |
+ - name: Return the difference of list1 and list2.
+ ansible.builtin.debug:
+ msg: "{{ list1 | community.general.lists_difference(list2) }}"
+ vars:
+ list1: [1, 2, 5, 3, 4, 10]
+ list2: [1, 2, 3, 4, 5, 11, 99]
+ # => [10]
+
+ - name: Return the difference of list1, list2 and list3.
+ ansible.builtin.debug:
+ msg: "{{ [list1, list2, list3] | community.general.lists_difference(flatten=true) }}"
+ vars:
+ list1: [1, 2, 5, 3, 4, 10]
+ list2: [1, 2, 3, 4, 5, 11, 99]
+ list3: [1, 2, 3, 4, 5, 10, 99, 101]
+ # => []
+
+RETURN:
+ _value:
+ description: A unique list of all the elements from the first list that do not appear on the other lists.
+ type: list
+ elements: any
diff --git a/ansible_collections/community/general/plugins/filter/lists_intersect.yml b/ansible_collections/community/general/plugins/filter/lists_intersect.yml
new file mode 100644
index 000000000..8253463de
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/lists_intersect.yml
@@ -0,0 +1,48 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION:
+ name: lists_intersect
+ short_description: Intersection of lists with a predictive order
+ version_added: 8.4.0
+ description:
+ - Provide a unique list of all the common elements of two or more lists.
+ - The order of the items in the resulting list is preserved.
+ options:
+ _input:
+ description: A list.
+ type: list
+ elements: any
+ required: true
+ flatten:
+ description: Whether to remove one hierarchy level from the input list.
+ type: boolean
+ default: false
+ author:
+ - Christoph Fiehe (@cfiehe)
+
+EXAMPLES: |
+ - name: Return the intersection of list1 and list2.
+ ansible.builtin.debug:
+ msg: "{{ list1 | community.general.lists_intersect(list2) }}"
+ vars:
+ list1: [1, 2, 5, 3, 4, 10]
+ list2: [1, 2, 3, 4, 5, 11, 99]
+ # => [1, 2, 5, 3, 4]
+
+ - name: Return the intersection of list1, list2 and list3.
+ ansible.builtin.debug:
+ msg: "{{ [list1, list2, list3] | community.general.lists_intersect(flatten=true) }}"
+ vars:
+ list1: [1, 2, 5, 3, 4, 10]
+ list2: [1, 2, 3, 4, 5, 11, 99]
+ list3: [1, 2, 3, 4, 5, 10, 99, 101]
+ # => [1, 2, 5, 3, 4]
+
+RETURN:
+ _value:
+ description: A unique list of all the common elements from the provided lists.
+ type: list
+ elements: any
diff --git a/ansible_collections/community/general/plugins/filter/lists_mergeby.py b/ansible_collections/community/general/plugins/filter/lists_mergeby.py
index 036dfe4d7..caf183492 100644
--- a/ansible_collections/community/general/plugins/filter/lists_mergeby.py
+++ b/ansible_collections/community/general/plugins/filter/lists_mergeby.py
@@ -12,9 +12,9 @@ DOCUMENTATION = '''
version_added: 2.0.0
author: Vladimir Botka (@vbotka)
description:
- - Merge two or more lists by attribute I(index). Optional parameters 'recursive' and 'list_merge'
+ - Merge two or more lists by attribute O(index). Optional parameters O(recursive) and O(list_merge)
control the merging of the lists in values. The function merge_hash from ansible.utils.vars
- is used. To learn details on how to use the parameters 'recursive' and 'list_merge' see
+ is used. To learn details on how to use the parameters O(recursive) and O(list_merge) see
Ansible User's Guide chapter "Using filters to manipulate data" section "Combining
hashes/dictionaries".
positional: another_list, index
diff --git a/ansible_collections/community/general/plugins/filter/lists_symmetric_difference.yml b/ansible_collections/community/general/plugins/filter/lists_symmetric_difference.yml
new file mode 100644
index 000000000..d985704c2
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/lists_symmetric_difference.yml
@@ -0,0 +1,48 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION:
+ name: lists_symmetric_difference
+ short_description: Symmetric Difference of lists with a predictive order
+ version_added: 8.4.0
+ description:
+ - Provide a unique list containing the symmetric difference of two or more lists.
+ - The order of the items in the resulting list is preserved.
+ options:
+ _input:
+ description: A list.
+ type: list
+ elements: any
+ required: true
+ flatten:
+ description: Whether to remove one hierarchy level from the input list.
+ type: boolean
+ default: false
+ author:
+ - Christoph Fiehe (@cfiehe)
+
+EXAMPLES: |
+ - name: Return the symmetric difference of list1 and list2.
+ ansible.builtin.debug:
+ msg: "{{ list1 | community.general.lists_symmetric_difference(list2) }}"
+ vars:
+ list1: [1, 2, 5, 3, 4, 10]
+ list2: [1, 2, 3, 4, 5, 11, 99]
+ # => [10, 11, 99]
+
+ - name: Return the symmetric difference of list1, list2 and list3.
+ ansible.builtin.debug:
+ msg: "{{ [list1, list2, list3] | community.general.lists_symmetric_difference(flatten=true) }}"
+ vars:
+ list1: [1, 2, 5, 3, 4, 10]
+ list2: [1, 2, 3, 4, 5, 11, 99]
+ list3: [1, 2, 3, 4, 5, 10, 99, 101]
+ # => [11, 1, 2, 3, 4, 5, 101]
+
+RETURN:
+ _value:
+ description: A unique list containing the symmetric difference of two or more lists.
+ type: list
+ elements: any
diff --git a/ansible_collections/community/general/plugins/filter/lists_union.yml b/ansible_collections/community/general/plugins/filter/lists_union.yml
new file mode 100644
index 000000000..ba6909083
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/lists_union.yml
@@ -0,0 +1,48 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION:
+ name: lists_union
+ short_description: Union of lists with a predictive order
+ version_added: 8.4.0
+ description:
+ - Provide a unique list of all the elements of two or more lists.
+ - The order of the items in the resulting list is preserved.
+ options:
+ _input:
+ description: A list.
+ type: list
+ elements: any
+ required: true
+ flatten:
+ description: Whether to remove one hierarchy level from the input list.
+ type: boolean
+ default: false
+ author:
+ - Christoph Fiehe (@cfiehe)
+
+EXAMPLES: |
+ - name: Return the union of list1, list2 and list3.
+ ansible.builtin.debug:
+ msg: "{{ list1 | community.general.lists_union(list2, list3) }}"
+ vars:
+ list1: [1, 2, 5, 3, 4, 10]
+ list2: [1, 2, 3, 4, 5, 11, 99]
+ list3: [1, 2, 3, 4, 5, 10, 99, 101]
+ # => [1, 2, 5, 3, 4, 10, 11, 99, 101]
+
+ - name: Return the union of list1 and list2.
+ ansible.builtin.debug:
+ msg: "{{ [list1, list2] | community.general.lists_union(flatten=true) }}"
+ vars:
+ list1: [1, 2, 5, 3, 4, 10]
+ list2: [1, 2, 3, 4, 5, 11, 99]
+ # => [1, 2, 5, 3, 4, 10, 11, 99]
+
+RETURN:
+ _value:
+ description: A unique list of all the elements from the provided lists.
+ type: list
+ elements: any
diff --git a/ansible_collections/community/general/plugins/filter/to_days.yml b/ansible_collections/community/general/plugins/filter/to_days.yml
index 19bc8faf2..b5f6424fa 100644
--- a/ansible_collections/community/general/plugins/filter/to_days.yml
+++ b/ansible_collections/community/general/plugins/filter/to_days.yml
@@ -13,12 +13,12 @@ DOCUMENTATION:
_input:
description:
- The time string to convert.
- - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week,
- C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec)
- and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s)
- can be added to a unit as well, so C(seconds) is the same as C(second).
+ - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week,
+ V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec)
+ and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s)
+ can be added to a unit as well, so V(seconds) is the same as V(second).
- Valid strings are space separated combinations of an integer with an optional minus sign and a unit.
- - Examples are C(1h), C(-5m), and C(3h -5m 6s).
+ - Examples are V(1h), V(-5m), and V(3h -5m 6s).
type: string
required: true
year:
diff --git a/ansible_collections/community/general/plugins/filter/to_hours.yml b/ansible_collections/community/general/plugins/filter/to_hours.yml
index 83826a590..353fdfc31 100644
--- a/ansible_collections/community/general/plugins/filter/to_hours.yml
+++ b/ansible_collections/community/general/plugins/filter/to_hours.yml
@@ -13,12 +13,12 @@ DOCUMENTATION:
_input:
description:
- The time string to convert.
- - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week,
- C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec)
- and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s)
- can be added to a unit as well, so C(seconds) is the same as C(second).
+ - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week,
+ V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec)
+ and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s)
+ can be added to a unit as well, so V(seconds) is the same as V(second).
- Valid strings are space separated combinations of an integer with an optional minus sign and a unit.
- - Examples are C(1h), C(-5m), and C(3h -5m 6s).
+ - Examples are V(1h), V(-5m), and V(3h -5m 6s).
type: string
required: true
year:
diff --git a/ansible_collections/community/general/plugins/filter/to_ini.py b/ansible_collections/community/general/plugins/filter/to_ini.py
new file mode 100644
index 000000000..22ef16d72
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/to_ini.py
@@ -0,0 +1,105 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2023, Steffen Scheib <steffen@scheib.me>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+DOCUMENTATION = r'''
+ name: to_ini
+ short_description: Converts a dictionary to the INI file format
+ version_added: 8.2.0
+ author: Steffen Scheib (@sscheib)
+ description:
+ - Converts a dictionary to the INI file format.
+ options:
+ _input:
+ description: The dictionary that should be converted to the INI format.
+ type: dictionary
+ required: true
+'''
+
+EXAMPLES = r'''
+ - name: Define a dictionary
+ ansible.builtin.set_fact:
+ my_dict:
+ section_name:
+ key_name: 'key value'
+
+ another_section:
+ connection: 'ssh'
+
+ - name: Write dictionary to INI file
+ ansible.builtin.copy:
+ dest: /tmp/test.ini
+ content: '{{ my_dict | community.general.to_ini }}'
+
+ # /tmp/test.ini will look like this:
+ # [section_name]
+ # key_name = key value
+ #
+ # [another_section]
+ # connection = ssh
+'''
+
+RETURN = r'''
+ _value:
+ description: A string formatted as INI file.
+ type: string
+'''
+
+
+__metaclass__ = type
+
+from ansible.errors import AnsibleFilterError
+from ansible.module_utils.common._collections_compat import Mapping
+from ansible.module_utils.six.moves import StringIO
+from ansible.module_utils.six.moves.configparser import ConfigParser
+from ansible.module_utils.common.text.converters import to_native
+
+
+class IniParser(ConfigParser):
+ ''' Implements a configparser which sets the correct optionxform '''
+
+ def __init__(self):
+ super().__init__()
+ self.optionxform = str
+
+
+def to_ini(obj):
+ ''' Read the given dict and return an INI formatted string '''
+
+ if not isinstance(obj, Mapping):
+ raise AnsibleFilterError(f'to_ini requires a dict, got {type(obj)}')
+
+ ini_parser = IniParser()
+
+ try:
+ ini_parser.read_dict(obj)
+ except Exception as ex:
+ raise AnsibleFilterError('to_ini failed to parse given dict:'
+ f'{to_native(ex)}', orig_exc=ex)
+
+ # catching empty dicts
+ if obj == dict():
+ raise AnsibleFilterError('to_ini received an empty dict. '
+ 'An empty dict cannot be converted.')
+
+ config = StringIO()
+ ini_parser.write(config)
+
+ # config.getvalue() returns two \n at the end
+ # with the below insanity, we remove the very last character of
+ # the resulting string
+ return ''.join(config.getvalue().rsplit(config.getvalue()[-1], 1))
+
+
+class FilterModule(object):
+ ''' Query filter '''
+
+ def filters(self):
+
+ return {
+ 'to_ini': to_ini
+ }
diff --git a/ansible_collections/community/general/plugins/filter/to_milliseconds.yml b/ansible_collections/community/general/plugins/filter/to_milliseconds.yml
index b6bb7e4be..19ed02438 100644
--- a/ansible_collections/community/general/plugins/filter/to_milliseconds.yml
+++ b/ansible_collections/community/general/plugins/filter/to_milliseconds.yml
@@ -13,12 +13,12 @@ DOCUMENTATION:
_input:
description:
- The time string to convert.
- - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week,
- C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec)
- and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s)
- can be added to a unit as well, so C(seconds) is the same as C(second).
+ - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week,
+ V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec)
+ and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s)
+ can be added to a unit as well, so V(seconds) is the same as V(second).
- Valid strings are space separated combinations of an integer with an optional minus sign and a unit.
- - Examples are C(1h), C(-5m), and C(3h -5m 6s).
+ - Examples are V(1h), V(-5m), and V(3h -5m 6s).
type: string
required: true
year:
diff --git a/ansible_collections/community/general/plugins/filter/to_minutes.yml b/ansible_collections/community/general/plugins/filter/to_minutes.yml
index 3b85dadc4..e8d6f763a 100644
--- a/ansible_collections/community/general/plugins/filter/to_minutes.yml
+++ b/ansible_collections/community/general/plugins/filter/to_minutes.yml
@@ -13,12 +13,12 @@ DOCUMENTATION:
_input:
description:
- The time string to convert.
- - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week,
- C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec)
- and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s)
- can be added to a unit as well, so C(seconds) is the same as C(second).
+ - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week,
+ V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec)
+ and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s)
+ can be added to a unit as well, so V(seconds) is the same as V(second).
- Valid strings are space separated combinations of an integer with an optional minus sign and a unit.
- - Examples are C(1h), C(-5m), and C(3h -5m 6s).
+ - Examples are V(1h), V(-5m), and V(3h -5m 6s).
type: string
required: true
year:
diff --git a/ansible_collections/community/general/plugins/filter/to_months.yml b/ansible_collections/community/general/plugins/filter/to_months.yml
index f13cee918..1f1cd661d 100644
--- a/ansible_collections/community/general/plugins/filter/to_months.yml
+++ b/ansible_collections/community/general/plugins/filter/to_months.yml
@@ -13,12 +13,12 @@ DOCUMENTATION:
_input:
description:
- The time string to convert.
- - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week,
- C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec)
- and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s)
- can be added to a unit as well, so C(seconds) is the same as C(second).
+ - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week,
+ V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec)
+ and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s)
+ can be added to a unit as well, so V(seconds) is the same as V(second).
- Valid strings are space separated combinations of an integer with an optional minus sign and a unit.
- - Examples are C(1h), C(-5m), and C(3h -5m 6s).
+ - Examples are V(1h), V(-5m), and V(3h -5m 6s).
type: string
required: true
year:
diff --git a/ansible_collections/community/general/plugins/filter/to_seconds.yml b/ansible_collections/community/general/plugins/filter/to_seconds.yml
index d6e6c4e46..d858e062a 100644
--- a/ansible_collections/community/general/plugins/filter/to_seconds.yml
+++ b/ansible_collections/community/general/plugins/filter/to_seconds.yml
@@ -13,12 +13,12 @@ DOCUMENTATION:
_input:
description:
- The time string to convert.
- - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week,
- C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec)
- and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s)
- can be added to a unit as well, so C(seconds) is the same as C(second).
+ - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week,
+ V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec)
+ and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s)
+ can be added to a unit as well, so V(seconds) is the same as V(second).
- Valid strings are space separated combinations of an integer with an optional minus sign and a unit.
- - Examples are C(1h), C(-5m), and C(3h -5m 6s).
+ - Examples are V(1h), V(-5m), and V(3h -5m 6s).
type: string
required: true
year:
diff --git a/ansible_collections/community/general/plugins/filter/to_time_unit.yml b/ansible_collections/community/general/plugins/filter/to_time_unit.yml
index c0149f0ac..bda124865 100644
--- a/ansible_collections/community/general/plugins/filter/to_time_unit.yml
+++ b/ansible_collections/community/general/plugins/filter/to_time_unit.yml
@@ -14,12 +14,12 @@ DOCUMENTATION:
_input:
description:
- The time string to convert.
- - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week,
- C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec)
- and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s)
- can be added to a unit as well, so C(seconds) is the same as C(second).
+ - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week,
+ V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec)
+ and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s)
+ can be added to a unit as well, so V(seconds) is the same as V(second).
- Valid strings are space separated combinations of an integer with an optional minus sign and a unit.
- - Examples are C(1h), C(-5m), and C(3h -5m 6s).
+ - Examples are V(1h), V(-5m), and V(3h -5m 6s).
type: string
required: true
unit:
diff --git a/ansible_collections/community/general/plugins/filter/to_weeks.yml b/ansible_collections/community/general/plugins/filter/to_weeks.yml
index 499c38627..7bf31bb65 100644
--- a/ansible_collections/community/general/plugins/filter/to_weeks.yml
+++ b/ansible_collections/community/general/plugins/filter/to_weeks.yml
@@ -13,12 +13,12 @@ DOCUMENTATION:
_input:
description:
- The time string to convert.
- - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week,
- C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec)
- and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s)
- can be added to a unit as well, so C(seconds) is the same as C(second).
+ - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week,
+ V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec)
+ and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s)
+ can be added to a unit as well, so V(seconds) is the same as V(second).
- Valid strings are space separated combinations of an integer with an optional minus sign and a unit.
- - Examples are C(1h), C(-5m), and C(3h -5m 6s).
+ - Examples are V(1h), V(-5m), and V(3h -5m 6s).
type: string
required: true
year:
diff --git a/ansible_collections/community/general/plugins/filter/to_years.yml b/ansible_collections/community/general/plugins/filter/to_years.yml
index 1a244a276..33c85a3ec 100644
--- a/ansible_collections/community/general/plugins/filter/to_years.yml
+++ b/ansible_collections/community/general/plugins/filter/to_years.yml
@@ -13,12 +13,12 @@ DOCUMENTATION:
_input:
description:
- The time string to convert.
- - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week,
- C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec)
- and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s)
- can be added to a unit as well, so C(seconds) is the same as C(second).
+ - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week,
+ V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec)
+ and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s)
+ can be added to a unit as well, so V(seconds) is the same as V(second).
- Valid strings are space separated combinations of an integer with an optional minus sign and a unit.
- - Examples are C(1h), C(-5m), and C(3h -5m 6s).
+ - Examples are V(1h), V(-5m), and V(3h -5m 6s).
type: string
required: true
year:
diff --git a/ansible_collections/community/general/plugins/inventory/cobbler.py b/ansible_collections/community/general/plugins/inventory/cobbler.py
index 936a409ae..8ca36f426 100644
--- a/ansible_collections/community/general/plugins/inventory/cobbler.py
+++ b/ansible_collections/community/general/plugins/inventory/cobbler.py
@@ -13,12 +13,14 @@ DOCUMENTATION = '''
version_added: 1.0.0
description:
- Get inventory hosts from the cobbler service.
- - "Uses a configuration file as an inventory source, it must end in C(.cobbler.yml) or C(.cobbler.yaml) and has a C(plugin: cobbler) entry."
+ - "Uses a configuration file as an inventory source, it must end in C(.cobbler.yml) or C(.cobbler.yaml) and have a C(plugin: cobbler) entry."
+ - Adds the primary IP addresses to C(cobbler_ipv4_address) and C(cobbler_ipv6_address) host variables if defined in Cobbler. The primary IP address is
+ defined as the management interface if defined, or the interface who's DNS name matches the hostname of the system, or else the first interface found.
extends_documentation_fragment:
- inventory_cache
options:
plugin:
- description: The name of this plugin, it should always be set to C(community.general.cobbler) for this plugin to recognize it as it's own.
+ description: The name of this plugin, it should always be set to V(community.general.cobbler) for this plugin to recognize it as it's own.
required: true
choices: [ 'cobbler', 'community.general.cobbler' ]
url:
@@ -32,45 +34,73 @@ DOCUMENTATION = '''
env:
- name: COBBLER_USER
password:
- description: Cobbler authentication password
+ description: Cobbler authentication password.
required: false
env:
- name: COBBLER_PASSWORD
cache_fallback:
- description: Fallback to cached results if connection to cobbler fails
+ description: Fallback to cached results if connection to cobbler fails.
type: boolean
default: false
+ exclude_mgmt_classes:
+ description: Management classes to exclude from inventory.
+ type: list
+ default: []
+ elements: str
+ version_added: 7.4.0
exclude_profiles:
description:
- Profiles to exclude from inventory.
- - Ignored if I(include_profiles) is specified.
+ - Ignored if O(include_profiles) is specified.
type: list
default: []
elements: str
+ include_mgmt_classes:
+ description: Management classes to include from inventory.
+ type: list
+ default: []
+ elements: str
+ version_added: 7.4.0
include_profiles:
description:
- Profiles to include from inventory.
- If specified, all other profiles will be excluded.
- - I(exclude_profiles) is ignored if I(include_profiles) is specified.
+ - O(exclude_profiles) is ignored if O(include_profiles) is specified.
type: list
default: []
elements: str
version_added: 4.4.0
+ inventory_hostname:
+ description:
+ - What to use for the ansible inventory hostname.
+ - By default the networking hostname is used if defined, otherwise the DNS name of the management or first non-static interface.
+ - If set to V(system), the cobbler system name is used.
+ type: str
+ choices: [ 'hostname', 'system' ]
+ default: hostname
+ version_added: 7.1.0
group_by:
- description: Keys to group hosts by
+ description: Keys to group hosts by.
type: list
elements: string
default: [ 'mgmt_classes', 'owners', 'status' ]
group:
- description: Group to place all hosts into
+ description: Group to place all hosts into.
default: cobbler
group_prefix:
- description: Prefix to apply to cobbler groups
+ description: Prefix to apply to cobbler groups.
default: cobbler_
want_facts:
- description: Toggle, if C(true) the plugin will retrieve host facts from the server
+ description: Toggle, if V(true) the plugin will retrieve host facts from the server.
type: boolean
default: true
+ want_ip_addresses:
+ description:
+ - Toggle, if V(true) the plugin will add a C(cobbler_ipv4_addresses) and C(cobbleer_ipv6_addresses) dictionary to the defined O(group) mapping
+ interface DNS names to IP addresses.
+ type: boolean
+ default: true
+ version_added: 7.1.0
'''
EXAMPLES = '''
@@ -85,8 +115,9 @@ import socket
from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_text
-from ansible.module_utils.six import iteritems
from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, to_safe_group_name
+from ansible.module_utils.six import text_type
+from ansible.utils.unsafe_proxy import wrap_var as make_unsafe
# xmlrpc
try:
@@ -128,7 +159,7 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
self.connection = xmlrpc_client.Server(self.cobbler_url, allow_none=True)
self.token = None
if self.get_option('user') is not None:
- self.token = self.connection.login(self.get_option('user'), self.get_option('password'))
+ self.token = self.connection.login(text_type(self.get_option('user')), text_type(self.get_option('password')))
return self.connection
def _init_cache(self):
@@ -198,9 +229,12 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
self.cache_key = self.get_cache_key(path)
self.use_cache = cache and self.get_option('cache')
+ self.exclude_mgmt_classes = self.get_option('exclude_mgmt_classes')
+ self.include_mgmt_classes = self.get_option('include_mgmt_classes')
self.exclude_profiles = self.get_option('exclude_profiles')
self.include_profiles = self.get_option('include_profiles')
self.group_by = self.get_option('group_by')
+ self.inventory_hostname = self.get_option('inventory_hostname')
for profile in self._get_profiles():
if profile['parent']:
@@ -236,22 +270,34 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
self.inventory.add_group(self.group)
self.display.vvvv('Added site group %s\n' % self.group)
+ ip_addresses = {}
+ ipv6_addresses = {}
for host in self._get_systems():
# Get the FQDN for the host and add it to the right groups
- hostname = host['hostname'] # None
+ if self.inventory_hostname == 'system':
+ hostname = make_unsafe(host['name']) # None
+ else:
+ hostname = make_unsafe(host['hostname']) # None
interfaces = host['interfaces']
- if self._exclude_profile(host['profile']):
- self.display.vvvv('Excluding host %s in profile %s\n' % (host['name'], host['profile']))
- continue
+ if set(host['mgmt_classes']) & set(self.include_mgmt_classes):
+ self.display.vvvv('Including host %s in mgmt_classes %s\n' % (host['name'], host['mgmt_classes']))
+ else:
+ if self._exclude_profile(host['profile']):
+ self.display.vvvv('Excluding host %s in profile %s\n' % (host['name'], host['profile']))
+ continue
+
+ if set(host['mgmt_classes']) & set(self.exclude_mgmt_classes):
+ self.display.vvvv('Excluding host %s in mgmt_classes %s\n' % (host['name'], host['mgmt_classes']))
+ continue
# hostname is often empty for non-static IP hosts
if hostname == '':
- for (iname, ivalue) in iteritems(interfaces):
+ for iname, ivalue in interfaces.items():
if ivalue['management'] or not ivalue['static']:
this_dns_name = ivalue.get('dns_name', None)
if this_dns_name is not None and this_dns_name != "":
- hostname = this_dns_name
+ hostname = make_unsafe(this_dns_name)
self.display.vvvv('Set hostname to %s from %s\n' % (hostname, iname))
if hostname == '':
@@ -262,8 +308,11 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
self.display.vvvv('Added host %s hostname %s\n' % (host['name'], hostname))
# Add host to profile group
- group_name = self._add_safe_group_name(host['profile'], child=hostname)
- self.display.vvvv('Added host %s to profile group %s\n' % (hostname, group_name))
+ if host['profile'] != '':
+ group_name = self._add_safe_group_name(host['profile'], child=hostname)
+ self.display.vvvv('Added host %s to profile group %s\n' % (hostname, group_name))
+ else:
+ self.display.warning('Host %s has an empty profile\n' % (hostname))
# Add host to groups specified by group_by fields
for group_by in self.group_by:
@@ -280,8 +329,51 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
self.inventory.add_child(self.group, hostname)
# Add host variables
+ ip_address = None
+ ip_address_first = None
+ ipv6_address = None
+ ipv6_address_first = None
+ for iname, ivalue in interfaces.items():
+ # Set to first interface or management interface if defined or hostname matches dns_name
+ if ivalue['ip_address'] != "":
+ if ip_address_first is None:
+ ip_address_first = ivalue['ip_address']
+ if ivalue['management']:
+ ip_address = ivalue['ip_address']
+ elif ivalue['dns_name'] == hostname and ip_address is None:
+ ip_address = ivalue['ip_address']
+ if ivalue['ipv6_address'] != "":
+ if ipv6_address_first is None:
+ ipv6_address_first = ivalue['ipv6_address']
+ if ivalue['management']:
+ ipv6_address = ivalue['ipv6_address']
+ elif ivalue['dns_name'] == hostname and ipv6_address is None:
+ ipv6_address = ivalue['ipv6_address']
+
+ # Collect all interface name mappings for adding to group vars
+ if self.get_option('want_ip_addresses'):
+ if ivalue['dns_name'] != "":
+ if ivalue['ip_address'] != "":
+ ip_addresses[ivalue['dns_name']] = ivalue['ip_address']
+ if ivalue['ipv6_address'] != "":
+ ip_addresses[ivalue['dns_name']] = ivalue['ipv6_address']
+
+ # Add ip_address to host if defined, use first if no management or matched dns_name
+ if ip_address is None and ip_address_first is not None:
+ ip_address = ip_address_first
+ if ip_address is not None:
+ self.inventory.set_variable(hostname, 'cobbler_ipv4_address', make_unsafe(ip_address))
+ if ipv6_address is None and ipv6_address_first is not None:
+ ipv6_address = ipv6_address_first
+ if ipv6_address is not None:
+ self.inventory.set_variable(hostname, 'cobbler_ipv6_address', make_unsafe(ipv6_address))
+
if self.get_option('want_facts'):
try:
- self.inventory.set_variable(hostname, 'cobbler', host)
+ self.inventory.set_variable(hostname, 'cobbler', make_unsafe(host))
except ValueError as e:
self.display.warning("Could not set host info for %s: %s" % (hostname, to_text(e)))
+
+ if self.get_option('want_ip_addresses'):
+ self.inventory.set_variable(self.group, 'cobbler_ipv4_addresses', make_unsafe(ip_addresses))
+ self.inventory.set_variable(self.group, 'cobbler_ipv6_addresses', make_unsafe(ipv6_addresses))
diff --git a/ansible_collections/community/general/plugins/inventory/gitlab_runners.py b/ansible_collections/community/general/plugins/inventory/gitlab_runners.py
index d68b8d4e2..536f4bb1b 100644
--- a/ansible_collections/community/general/plugins/inventory/gitlab_runners.py
+++ b/ansible_collections/community/general/plugins/inventory/gitlab_runners.py
@@ -14,7 +14,6 @@ DOCUMENTATION = '''
- Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
short_description: Ansible dynamic inventory plugin for GitLab runners.
requirements:
- - python >= 2.7
- python-gitlab > 1.8.0
extends_documentation_fragment:
- constructed
@@ -84,6 +83,7 @@ keyed_groups:
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils.common.text.converters import to_native
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
+from ansible.utils.unsafe_proxy import wrap_var as make_unsafe
try:
import gitlab
@@ -106,11 +106,11 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
else:
runners = gl.runners.all()
for runner in runners:
- host = str(runner['id'])
+ host = make_unsafe(str(runner['id']))
ip_address = runner['ip_address']
- host_attrs = vars(gl.runners.get(runner['id']))['_attrs']
+ host_attrs = make_unsafe(vars(gl.runners.get(runner['id']))['_attrs'])
self.inventory.add_host(host, group='gitlab_runners')
- self.inventory.set_variable(host, 'ansible_host', ip_address)
+ self.inventory.set_variable(host, 'ansible_host', make_unsafe(ip_address))
if self.get_option('verbose_output', True):
self.inventory.set_variable(host, 'gitlab_runner_attributes', host_attrs)
diff --git a/ansible_collections/community/general/plugins/inventory/icinga2.py b/ansible_collections/community/general/plugins/inventory/icinga2.py
index 70e0f5733..6746bb8e0 100644
--- a/ansible_collections/community/general/plugins/inventory/icinga2.py
+++ b/ansible_collections/community/general/plugins/inventory/icinga2.py
@@ -58,11 +58,17 @@ DOCUMENTATION = '''
description:
- Allows the override of the inventory name based on different attributes.
- This allows for changing the way limits are used.
- - The current default, C(address), is sometimes not unique or present. We recommend to use C(name) instead.
+ - The current default, V(address), is sometimes not unique or present. We recommend to use V(name) instead.
type: string
default: address
choices: ['name', 'display_name', 'address']
version_added: 4.2.0
+ group_by_hostgroups:
+ description:
+ - Uses Icinga2 hostgroups as groups.
+ type: boolean
+ default: true
+ version_added: 8.4.0
'''
EXAMPLES = r'''
@@ -72,7 +78,7 @@ url: http://localhost:5665
user: ansible
password: secure
host_filter: \"linux-servers\" in host.groups
-validate_certs: false
+validate_certs: false # only do this when connecting to localhost!
inventory_attr: name
groups:
# simple name matching
@@ -96,6 +102,7 @@ from ansible.errors import AnsibleParserError
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
+from ansible.utils.unsafe_proxy import wrap_var as make_unsafe
class InventoryModule(BaseInventoryPlugin, Constructable):
@@ -114,6 +121,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
self.ssl_verify = None
self.host_filter = None
self.inventory_attr = None
+ self.group_by_hostgroups = None
self.cache_key = None
self.use_cache = None
@@ -233,31 +241,32 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
"""Convert Icinga2 API data to JSON format for Ansible"""
groups_dict = {"_meta": {"hostvars": {}}}
for entry in json_data:
- host_attrs = entry['attrs']
+ host_attrs = make_unsafe(entry['attrs'])
if self.inventory_attr == "name":
- host_name = entry.get('name')
+ host_name = make_unsafe(entry.get('name'))
if self.inventory_attr == "address":
# When looking for address for inventory, if missing fallback to object name
if host_attrs.get('address', '') != '':
- host_name = host_attrs.get('address')
+ host_name = make_unsafe(host_attrs.get('address'))
else:
- host_name = entry.get('name')
+ host_name = make_unsafe(entry.get('name'))
if self.inventory_attr == "display_name":
host_name = host_attrs.get('display_name')
if host_attrs['state'] == 0:
host_attrs['state'] = 'on'
else:
host_attrs['state'] = 'off'
- host_groups = host_attrs.get('groups')
self.inventory.add_host(host_name)
- for group in host_groups:
- if group not in self.inventory.groups.keys():
- self.inventory.add_group(group)
- self.inventory.add_child(group, host_name)
+ if self.group_by_hostgroups:
+ host_groups = host_attrs.get('groups')
+ for group in host_groups:
+ if group not in self.inventory.groups.keys():
+ self.inventory.add_group(group)
+ self.inventory.add_child(group, host_name)
# If the address attribute is populated, override ansible_host with the value
if host_attrs.get('address') != '':
self.inventory.set_variable(host_name, 'ansible_host', host_attrs.get('address'))
- self.inventory.set_variable(host_name, 'hostname', entry.get('name'))
+ self.inventory.set_variable(host_name, 'hostname', make_unsafe(entry.get('name')))
self.inventory.set_variable(host_name, 'display_name', host_attrs.get('display_name'))
self.inventory.set_variable(host_name, 'state',
host_attrs['state'])
@@ -277,12 +286,23 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
self._read_config_data(path)
# Store the options from the YAML file
- self.icinga2_url = self.get_option('url').rstrip('/') + '/v1'
+ self.icinga2_url = self.get_option('url')
self.icinga2_user = self.get_option('user')
self.icinga2_password = self.get_option('password')
self.ssl_verify = self.get_option('validate_certs')
self.host_filter = self.get_option('host_filter')
self.inventory_attr = self.get_option('inventory_attr')
+ self.group_by_hostgroups = self.get_option('group_by_hostgroups')
+
+ if self.templar.is_template(self.icinga2_url):
+ self.icinga2_url = self.templar.template(variable=self.icinga2_url, disable_lookups=False)
+ if self.templar.is_template(self.icinga2_user):
+ self.icinga2_user = self.templar.template(variable=self.icinga2_user, disable_lookups=False)
+ if self.templar.is_template(self.icinga2_password):
+ self.icinga2_password = self.templar.template(variable=self.icinga2_password, disable_lookups=False)
+
+ self.icinga2_url = self.icinga2_url.rstrip('/') + '/v1'
+
# Not currently enabled
# self.cache_key = self.get_cache_key(path)
# self.use_cache = cache and self.get_option('cache')
diff --git a/ansible_collections/community/general/plugins/inventory/linode.py b/ansible_collections/community/general/plugins/inventory/linode.py
index b28cfa27b..fc79f12c5 100644
--- a/ansible_collections/community/general/plugins/inventory/linode.py
+++ b/ansible_collections/community/general/plugins/inventory/linode.py
@@ -12,7 +12,6 @@ DOCUMENTATION = r'''
- Luke Murphy (@decentral1se)
short_description: Ansible dynamic inventory plugin for Linode.
requirements:
- - python >= 2.7
- linode_api4 >= 2.0.0
description:
- Reads inventories from the Linode API v4.
@@ -123,6 +122,7 @@ compose:
from ansible.errors import AnsibleError
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+from ansible.utils.unsafe_proxy import wrap_var as make_unsafe
try:
@@ -199,20 +199,21 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
def _add_instances_to_groups(self):
"""Add instance names to their dynamic inventory groups."""
for instance in self.instances:
- self.inventory.add_host(instance.label, group=instance.group)
+ self.inventory.add_host(make_unsafe(instance.label), group=instance.group)
def _add_hostvars_for_instances(self):
"""Add hostvars for instances in the dynamic inventory."""
ip_style = self.get_option('ip_style')
for instance in self.instances:
hostvars = instance._raw_json
+ hostname = make_unsafe(instance.label)
for hostvar_key in hostvars:
if ip_style == 'api' and hostvar_key in ['ipv4', 'ipv6']:
continue
self.inventory.set_variable(
- instance.label,
+ hostname,
hostvar_key,
- hostvars[hostvar_key]
+ make_unsafe(hostvars[hostvar_key])
)
if ip_style == 'api':
ips = instance.ips.ipv4.public + instance.ips.ipv4.private
@@ -221,9 +222,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
for ip_type in set(ip.type for ip in ips):
self.inventory.set_variable(
- instance.label,
+ hostname,
ip_type,
- self._ip_data([ip for ip in ips if ip.type == ip_type])
+ make_unsafe(self._ip_data([ip for ip in ips if ip.type == ip_type]))
)
def _ip_data(self, ip_list):
@@ -254,30 +255,44 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
self._add_instances_to_groups()
self._add_hostvars_for_instances()
for instance in self.instances:
- variables = self.inventory.get_host(instance.label).get_vars()
+ hostname = make_unsafe(instance.label)
+ variables = self.inventory.get_host(hostname).get_vars()
self._add_host_to_composed_groups(
self.get_option('groups'),
variables,
- instance.label,
+ hostname,
strict=strict)
self._add_host_to_keyed_groups(
self.get_option('keyed_groups'),
variables,
- instance.label,
+ hostname,
strict=strict)
self._set_composite_vars(
self.get_option('compose'),
variables,
- instance.label,
+ hostname,
strict=strict)
def verify_file(self, path):
- """Verify the Linode configuration file."""
+ """Verify the Linode configuration file.
+
+ Return true/false if the config-file is valid for this plugin
+
+ Args:
+ str(path): path to the config
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ bool(valid): is valid config file"""
+ valid = False
if super(InventoryModule, self).verify_file(path):
- endings = ('linode.yaml', 'linode.yml')
- if any((path.endswith(ending) for ending in endings)):
- return True
- return False
+ if path.endswith(("linode.yaml", "linode.yml")):
+ valid = True
+ else:
+ self.display.vvv('Inventory source not ending in "linode.yaml" or "linode.yml"')
+ return valid
def parse(self, inventory, loader, path, cache=True):
"""Dynamically parse Linode the cloud inventory."""
diff --git a/ansible_collections/community/general/plugins/inventory/lxd.py b/ansible_collections/community/general/plugins/inventory/lxd.py
index bd0a6ce00..c803f47dd 100644
--- a/ansible_collections/community/general/plugins/inventory/lxd.py
+++ b/ansible_collections/community/general/plugins/inventory/lxd.py
@@ -41,14 +41,28 @@ DOCUMENTATION = r'''
aliases: [ cert_file ]
default: $HOME/.config/lxc/client.crt
type: path
+ server_cert:
+ description:
+ - The server certificate file path.
+ type: path
+ version_added: 8.0.0
+ server_check_hostname:
+ description:
+ - This option controls if the server's hostname is checked as part of the HTTPS connection verification.
+ This can be useful to disable, if for example, the server certificate provided (see O(server_cert) option)
+ does not cover a name matching the one used to communicate with the server. Such mismatch is common as LXD
+ generates self-signed server certificates by default.
+ type: bool
+ default: true
+ version_added: 8.0.0
trust_password:
description:
- The client trusted password.
- You need to set this password on the lxd server before
running this module using the following command
C(lxc config set core.trust_password <some random password>)
- See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).
- - If I(trust_password) is set, this module send a request for authentication before sending any requests.
+ See U(https://documentation.ubuntu.com/lxd/en/latest/authentication/#adding-client-certificates-using-a-trust-password).
+ - If O(trust_password) is set, this module send a request for authentication before sending any requests.
type: str
state:
description: Filter the instance according to the current status.
@@ -62,7 +76,7 @@ DOCUMENTATION = r'''
version_added: 6.2.0
type_filter:
description:
- - Filter the instances by type C(virtual-machine), C(container) or C(both).
+ - Filter the instances by type V(virtual-machine), V(container) or V(both).
- The first version of the inventory only supported containers.
type: str
default: container
@@ -70,18 +84,18 @@ DOCUMENTATION = r'''
version_added: 4.2.0
prefered_instance_network_interface:
description:
- - If an instance has multiple network interfaces, select which one is the prefered as pattern.
+ - If an instance has multiple network interfaces, select which one is the preferred as pattern.
- Combined with the first number that can be found e.g. 'eth' + 0.
- - The option has been renamed from I(prefered_container_network_interface) to I(prefered_instance_network_interface) in community.general 3.8.0.
- The old name still works as an alias.
+ - The option has been renamed from O(prefered_container_network_interface) to O(prefered_instance_network_interface)
+ in community.general 3.8.0. The old name still works as an alias.
type: str
default: eth
aliases:
- prefered_container_network_interface
prefered_instance_network_family:
description:
- - If an instance has multiple network interfaces, which one is the prefered by family.
- - Specify C(inet) for IPv4 and C(inet6) for IPv6.
+ - If an instance has multiple network interfaces, which one is the preferred by family.
+ - Specify V(inet) for IPv4 and V(inet6) for IPv6.
type: str
default: inet
choices: [ 'inet', 'inet6' ]
@@ -161,6 +175,7 @@ from ansible.module_utils.six import raise_from
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException
+from ansible.utils.unsafe_proxy import wrap_var as make_unsafe
try:
import ipaddress
@@ -286,7 +301,7 @@ class InventoryModule(BaseInventoryPlugin):
urls = (url for url in url_list if self.validate_url(url))
for url in urls:
try:
- socket_connection = LXDClient(url, self.client_key, self.client_cert, self.debug)
+ socket_connection = LXDClient(url, self.client_key, self.client_cert, self.debug, self.server_cert, self.server_check_hostname)
return socket_connection
except LXDClientException as err:
error_storage[url] = err
@@ -359,7 +374,7 @@ class InventoryModule(BaseInventoryPlugin):
Kwargs:
None
Source:
- https://github.com/lxc/lxd/blob/master/doc/rest-api.md
+ https://documentation.ubuntu.com/lxd/en/latest/rest-api/
Raises:
None
Returns:
@@ -376,7 +391,7 @@ class InventoryModule(BaseInventoryPlugin):
def get_instance_data(self, names):
"""Create Inventory of the instance
- Iterate through the different branches of the instances and collect Informations.
+ Iterate through the different branches of the instances and collect Information.
Args:
list(names): List of instance names
@@ -398,7 +413,7 @@ class InventoryModule(BaseInventoryPlugin):
def get_network_data(self, names):
"""Create Inventory of the instance
- Iterate through the different branches of the instances and collect Informations.
+ Iterate through the different branches of the instances and collect Information.
Args:
list(names): List of instance names
@@ -451,12 +466,12 @@ class InventoryModule(BaseInventoryPlugin):
return network_configuration
def get_prefered_instance_network_interface(self, instance_name):
- """Helper to get the prefered interface of thr instance
+ """Helper to get the preferred interface of thr instance
- Helper to get the prefered interface provide by neme pattern from 'prefered_instance_network_interface'.
+ Helper to get the preferred interface provide by neme pattern from 'prefered_instance_network_interface'.
Args:
- str(containe_name): name of instance
+ str(instance_name): name of instance
Kwargs:
None
Raises:
@@ -481,7 +496,7 @@ class InventoryModule(BaseInventoryPlugin):
Helper to get the VLAN_ID from the instance
Args:
- str(containe_name): name of instance
+ str(instance_name): name of instance
Kwargs:
None
Raises:
@@ -563,7 +578,7 @@ class InventoryModule(BaseInventoryPlugin):
else:
path[instance_name][key] = value
except KeyError as err:
- raise AnsibleParserError("Unable to store Informations: {0}".format(to_native(err)))
+ raise AnsibleParserError("Unable to store Information: {0}".format(to_native(err)))
def extract_information_from_instance_configs(self):
"""Process configuration information
@@ -656,7 +671,7 @@ class InventoryModule(BaseInventoryPlugin):
if self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name)): # instance have network interfaces
self.inventory.set_variable(instance_name, 'ansible_connection', 'ssh')
- self.inventory.set_variable(instance_name, 'ansible_host', interface_selection(instance_name))
+ self.inventory.set_variable(instance_name, 'ansible_host', make_unsafe(interface_selection(instance_name)))
else:
self.inventory.set_variable(instance_name, 'ansible_connection', 'local')
@@ -682,31 +697,39 @@ class InventoryModule(BaseInventoryPlugin):
if self.filter.lower() != instance_state:
continue
# add instance
+ instance_name = make_unsafe(instance_name)
self.inventory.add_host(instance_name)
- # add network informations
+ # add network information
self.build_inventory_network(instance_name)
# add os
v = self._get_data_entry('inventory/{0}/os'.format(instance_name))
if v:
- self.inventory.set_variable(instance_name, 'ansible_lxd_os', v.lower())
+ self.inventory.set_variable(instance_name, 'ansible_lxd_os', make_unsafe(v.lower()))
# add release
v = self._get_data_entry('inventory/{0}/release'.format(instance_name))
if v:
- self.inventory.set_variable(instance_name, 'ansible_lxd_release', v.lower())
+ self.inventory.set_variable(
+ instance_name, 'ansible_lxd_release', make_unsafe(v.lower()))
# add profile
- self.inventory.set_variable(instance_name, 'ansible_lxd_profile', self._get_data_entry('inventory/{0}/profile'.format(instance_name)))
+ self.inventory.set_variable(
+ instance_name, 'ansible_lxd_profile', make_unsafe(self._get_data_entry('inventory/{0}/profile'.format(instance_name))))
# add state
- self.inventory.set_variable(instance_name, 'ansible_lxd_state', instance_state)
+ self.inventory.set_variable(
+ instance_name, 'ansible_lxd_state', make_unsafe(instance_state))
# add type
- self.inventory.set_variable(instance_name, 'ansible_lxd_type', self._get_data_entry('inventory/{0}/type'.format(instance_name)))
+ self.inventory.set_variable(
+ instance_name, 'ansible_lxd_type', make_unsafe(self._get_data_entry('inventory/{0}/type'.format(instance_name))))
# add location information
if self._get_data_entry('inventory/{0}/location'.format(instance_name)) != "none": # wrong type by lxd 'none' != 'None'
- self.inventory.set_variable(instance_name, 'ansible_lxd_location', self._get_data_entry('inventory/{0}/location'.format(instance_name)))
+ self.inventory.set_variable(
+ instance_name, 'ansible_lxd_location', make_unsafe(self._get_data_entry('inventory/{0}/location'.format(instance_name))))
# add VLAN_ID information
if self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name)):
- self.inventory.set_variable(instance_name, 'ansible_lxd_vlan_ids', self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name)))
+ self.inventory.set_variable(
+ instance_name, 'ansible_lxd_vlan_ids', make_unsafe(self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name))))
# add project
- self.inventory.set_variable(instance_name, 'ansible_lxd_project', self._get_data_entry('inventory/{0}/project'.format(instance_name)))
+ self.inventory.set_variable(
+ instance_name, 'ansible_lxd_project', make_unsafe(self._get_data_entry('inventory/{0}/project'.format(instance_name))))
def build_inventory_groups_location(self, group_name):
"""create group by attribute: location
@@ -979,7 +1002,7 @@ class InventoryModule(BaseInventoryPlugin):
for group_name in self.groupby:
if not group_name.isalnum():
raise AnsibleParserError('Invalid character(s) in groupname: {0}'.format(to_native(group_name)))
- group_type(group_name)
+ group_type(make_unsafe(group_name))
def build_inventory(self):
"""Build dynamic inventory
@@ -1078,6 +1101,8 @@ class InventoryModule(BaseInventoryPlugin):
try:
self.client_key = self.get_option('client_key')
self.client_cert = self.get_option('client_cert')
+ self.server_cert = self.get_option('server_cert')
+ self.server_check_hostname = self.get_option('server_check_hostname')
self.project = self.get_option('project')
self.debug = self.DEBUG
self.data = {} # store for inventory-data
diff --git a/ansible_collections/community/general/plugins/inventory/nmap.py b/ansible_collections/community/general/plugins/inventory/nmap.py
index a03cf3e6f..3a28007a3 100644
--- a/ansible_collections/community/general/plugins/inventory/nmap.py
+++ b/ansible_collections/community/general/plugins/inventory/nmap.py
@@ -23,7 +23,7 @@ DOCUMENTATION = '''
required: true
choices: ['nmap', 'community.general.nmap']
sudo:
- description: Set to C(true) to execute a C(sudo nmap) plugin scan.
+ description: Set to V(true) to execute a C(sudo nmap) plugin scan.
version_added: 4.8.0
default: false
type: boolean
@@ -36,7 +36,7 @@ DOCUMENTATION = '''
exclude:
description:
- List of addresses to exclude.
- - For example C(10.2.2.15-25) or C(10.2.2.15,10.2.2.16).
+ - For example V(10.2.2.15-25) or V(10.2.2.15,10.2.2.16).
type: list
elements: string
env:
@@ -45,8 +45,8 @@ DOCUMENTATION = '''
port:
description:
- Only scan specific port or port range (C(-p)).
- - For example, you could pass C(22) for a single port, C(1-65535) for a range of ports,
- or C(U:53,137,T:21-25,139,8080,S:9) to check port 53 with UDP, ports 21-25 with TCP, port 9 with SCTP, and ports 137, 139, and 8080 with all.
+ - For example, you could pass V(22) for a single port, V(1-65535) for a range of ports,
+ or V(U:53,137,T:21-25,139,8080,S:9) to check port 53 with UDP, ports 21-25 with TCP, port 9 with SCTP, and ports 137, 139, and 8080 with all.
type: string
version_added: 6.5.0
ports:
@@ -64,14 +64,14 @@ DOCUMENTATION = '''
udp_scan:
description:
- Scan via UDP.
- - Depending on your system you might need I(sudo=true) for this to work.
+ - Depending on your system you might need O(sudo=true) for this to work.
type: boolean
default: false
version_added: 6.1.0
icmp_timestamp:
description:
- Scan via ICMP Timestamp (C(-PP)).
- - Depending on your system you might need I(sudo=true) for this to work.
+ - Depending on your system you might need O(sudo=true) for this to work.
type: boolean
default: false
version_added: 6.1.0
@@ -81,10 +81,15 @@ DOCUMENTATION = '''
default: false
version_added: 6.5.0
dns_resolve:
- description: Whether to always (C(true)) or never (C(false)) do DNS resolution.
+ description: Whether to always (V(true)) or never (V(false)) do DNS resolution.
type: boolean
default: false
version_added: 6.1.0
+ use_arp_ping:
+ description: Whether to always (V(true)) use the quick ARP ping or (V(false)) a slower but more reliable method.
+ type: boolean
+ default: true
+ version_added: 7.4.0
notes:
- At least one of ipv4 or ipv6 is required to be True, both can be True, but they cannot both be False.
- 'TODO: add OS fingerprinting'
@@ -121,6 +126,7 @@ from ansible.errors import AnsibleParserError
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
from ansible.module_utils.common.process import get_bin_path
+from ansible.utils.unsafe_proxy import wrap_var as make_unsafe
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
@@ -138,6 +144,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
strict = self.get_option('strict')
for host in hosts:
+ host = make_unsafe(host)
hostname = host['name']
self.inventory.add_host(hostname)
for var, value in host.items():
@@ -196,40 +203,43 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
# setup command
cmd = [self._nmap]
- if self._options['sudo']:
+ if self.get_option('sudo'):
cmd.insert(0, 'sudo')
- if self._options['port']:
+ if self.get_option('port'):
cmd.append('-p')
- cmd.append(self._options['port'])
+ cmd.append(self.get_option('port'))
- if not self._options['ports']:
+ if not self.get_option('ports'):
cmd.append('-sP')
- if self._options['ipv4'] and not self._options['ipv6']:
+ if self.get_option('ipv4') and not self.get_option('ipv6'):
cmd.append('-4')
- elif self._options['ipv6'] and not self._options['ipv4']:
+ elif self.get_option('ipv6') and not self.get_option('ipv4'):
cmd.append('-6')
- elif not self._options['ipv6'] and not self._options['ipv4']:
+ elif not self.get_option('ipv6') and not self.get_option('ipv4'):
raise AnsibleParserError('One of ipv4 or ipv6 must be enabled for this plugin')
- if self._options['exclude']:
+ if self.get_option('exclude'):
cmd.append('--exclude')
- cmd.append(','.join(self._options['exclude']))
+ cmd.append(','.join(self.get_option('exclude')))
- if self._options['dns_resolve']:
+ if self.get_option('dns_resolve'):
cmd.append('-n')
- if self._options['udp_scan']:
+ if self.get_option('udp_scan'):
cmd.append('-sU')
- if self._options['icmp_timestamp']:
+ if self.get_option('icmp_timestamp'):
cmd.append('-PP')
- if self._options['open']:
+ if self.get_option('open'):
cmd.append('--open')
- cmd.append(self._options['address'])
+ if not self.get_option('use_arp_ping'):
+ cmd.append('--disable-arp-ping')
+
+ cmd.append(self.get_option('address'))
try:
# execute
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
diff --git a/ansible_collections/community/general/plugins/inventory/online.py b/ansible_collections/community/general/plugins/inventory/online.py
index 3fccd58d2..b3a9ecd37 100644
--- a/ansible_collections/community/general/plugins/inventory/online.py
+++ b/ansible_collections/community/general/plugins/inventory/online.py
@@ -68,6 +68,7 @@ from ansible.plugins.inventory import BaseInventoryPlugin
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils.six.moves.urllib.parse import urljoin
+from ansible.utils.unsafe_proxy import wrap_var as make_unsafe
class InventoryModule(BaseInventoryPlugin):
@@ -169,20 +170,20 @@ class InventoryModule(BaseInventoryPlugin):
"support"
)
for attribute in targeted_attributes:
- self.inventory.set_variable(hostname, attribute, host_infos[attribute])
+ self.inventory.set_variable(hostname, attribute, make_unsafe(host_infos[attribute]))
if self.extract_public_ipv4(host_infos=host_infos):
- self.inventory.set_variable(hostname, "public_ipv4", self.extract_public_ipv4(host_infos=host_infos))
- self.inventory.set_variable(hostname, "ansible_host", self.extract_public_ipv4(host_infos=host_infos))
+ self.inventory.set_variable(hostname, "public_ipv4", make_unsafe(self.extract_public_ipv4(host_infos=host_infos)))
+ self.inventory.set_variable(hostname, "ansible_host", make_unsafe(self.extract_public_ipv4(host_infos=host_infos)))
if self.extract_private_ipv4(host_infos=host_infos):
- self.inventory.set_variable(hostname, "public_ipv4", self.extract_private_ipv4(host_infos=host_infos))
+ self.inventory.set_variable(hostname, "public_ipv4", make_unsafe(self.extract_private_ipv4(host_infos=host_infos)))
if self.extract_os_name(host_infos=host_infos):
- self.inventory.set_variable(hostname, "os_name", self.extract_os_name(host_infos=host_infos))
+ self.inventory.set_variable(hostname, "os_name", make_unsafe(self.extract_os_name(host_infos=host_infos)))
if self.extract_os_version(host_infos=host_infos):
- self.inventory.set_variable(hostname, "os_version", self.extract_os_name(host_infos=host_infos))
+ self.inventory.set_variable(hostname, "os_version", make_unsafe(self.extract_os_name(host_infos=host_infos)))
def _filter_host(self, host_infos, hostname_preferences):
@@ -201,6 +202,8 @@ class InventoryModule(BaseInventoryPlugin):
if not hostname:
return
+ hostname = make_unsafe(hostname)
+
self.inventory.add_host(host=hostname)
self._fill_host_variables(hostname=hostname, host_infos=host_infos)
@@ -210,6 +213,8 @@ class InventoryModule(BaseInventoryPlugin):
if not group:
return
+ group = make_unsafe(group)
+
self.inventory.add_group(group=group)
self.inventory.add_host(group=group, host=hostname)
diff --git a/ansible_collections/community/general/plugins/inventory/opennebula.py b/ansible_collections/community/general/plugins/inventory/opennebula.py
index 603920edc..3babfa232 100644
--- a/ansible_collections/community/general/plugins/inventory/opennebula.py
+++ b/ansible_collections/community/general/plugins/inventory/opennebula.py
@@ -17,9 +17,9 @@ DOCUMENTATION = r'''
- constructed
description:
- Get inventory hosts from OpenNebula cloud.
- - Uses an YAML configuration file ending with either I(opennebula.yml) or I(opennebula.yaml)
+ - Uses an YAML configuration file ending with either C(opennebula.yml) or C(opennebula.yaml)
to set parameter values.
- - Uses I(api_authfile), C(~/.one/one_auth), or C(ONE_AUTH) pointing to a OpenNebula credentials file.
+ - Uses O(api_authfile), C(~/.one/one_auth), or E(ONE_AUTH) pointing to a OpenNebula credentials file.
options:
plugin:
description: Token that ensures this is a source file for the 'opennebula' plugin.
@@ -31,7 +31,7 @@ DOCUMENTATION = r'''
- URL of the OpenNebula RPC server.
- It is recommended to use HTTPS so that the username/password are not
transferred over the network unencrypted.
- - If not set then the value of the C(ONE_URL) environment variable is used.
+ - If not set then the value of the E(ONE_URL) environment variable is used.
env:
- name: ONE_URL
required: true
@@ -39,29 +39,29 @@ DOCUMENTATION = r'''
api_username:
description:
- Name of the user to login into the OpenNebula RPC server. If not set
- then the value of the C(ONE_USERNAME) environment variable is used.
+ then the value of the E(ONE_USERNAME) environment variable is used.
env:
- name: ONE_USERNAME
type: string
api_password:
description:
- Password or a token of the user to login into OpenNebula RPC server.
- - If not set, the value of the C(ONE_PASSWORD) environment variable is used.
+ - If not set, the value of the E(ONE_PASSWORD) environment variable is used.
env:
- name: ONE_PASSWORD
required: false
type: string
api_authfile:
description:
- - If both I(api_username) or I(api_password) are not set, then it will try
+ - If both O(api_username) or O(api_password) are not set, then it will try
authenticate with ONE auth file. Default path is C(~/.one/one_auth).
- - Set environment variable C(ONE_AUTH) to override this path.
+ - Set environment variable E(ONE_AUTH) to override this path.
env:
- name: ONE_AUTH
required: false
type: string
hostname:
- description: Field to match the hostname. Note C(v4_first_ip) corresponds to the first IPv4 found on VM.
+ description: Field to match the hostname. Note V(v4_first_ip) corresponds to the first IPv4 found on VM.
type: string
default: v4_first_ip
choices:
@@ -97,6 +97,7 @@ except ImportError:
from ansible.errors import AnsibleError
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
from ansible.module_utils.common.text.converters import to_native
+from ansible.utils.unsafe_proxy import wrap_var as make_unsafe
from collections import namedtuple
import os
@@ -215,6 +216,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
filter_by_label = self.get_option('filter_by_label')
servers = self._retrieve_servers(filter_by_label)
for server in servers:
+ server = make_unsafe(server)
hostname = server['name']
# check for labels
if group_by_labels and server['LABELS']:
diff --git a/ansible_collections/community/general/plugins/inventory/proxmox.py b/ansible_collections/community/general/plugins/inventory/proxmox.py
index dc2e1febc..ed55ef1b6 100644
--- a/ansible_collections/community/general/plugins/inventory/proxmox.py
+++ b/ansible_collections/community/general/plugins/inventory/proxmox.py
@@ -25,15 +25,15 @@ DOCUMENTATION = '''
- inventory_cache
options:
plugin:
- description: The name of this plugin, it should always be set to C(community.general.proxmox) for this plugin to recognize it as it's own.
+ description: The name of this plugin, it should always be set to V(community.general.proxmox) for this plugin to recognize it as it's own.
required: true
choices: ['community.general.proxmox']
type: str
url:
description:
- URL to Proxmox cluster.
- - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_URL) will be used instead.
- - Since community.general 4.7.0 you can also use templating to specify the value of the I(url).
+ - If the value is not specified in the inventory configuration, the value of environment variable E(PROXMOX_URL) will be used instead.
+ - Since community.general 4.7.0 you can also use templating to specify the value of the O(url).
default: 'http://localhost:8006'
type: str
env:
@@ -42,8 +42,8 @@ DOCUMENTATION = '''
user:
description:
- Proxmox authentication user.
- - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_USER) will be used instead.
- - Since community.general 4.7.0 you can also use templating to specify the value of the I(user).
+ - If the value is not specified in the inventory configuration, the value of environment variable E(PROXMOX_USER) will be used instead.
+ - Since community.general 4.7.0 you can also use templating to specify the value of the O(user).
required: true
type: str
env:
@@ -52,9 +52,9 @@ DOCUMENTATION = '''
password:
description:
- Proxmox authentication password.
- - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_PASSWORD) will be used instead.
- - Since community.general 4.7.0 you can also use templating to specify the value of the I(password).
- - If you do not specify a password, you must set I(token_id) and I(token_secret) instead.
+ - If the value is not specified in the inventory configuration, the value of environment variable E(PROXMOX_PASSWORD) will be used instead.
+ - Since community.general 4.7.0 you can also use templating to specify the value of the O(password).
+ - If you do not specify a password, you must set O(token_id) and O(token_secret) instead.
type: str
env:
- name: PROXMOX_PASSWORD
@@ -62,8 +62,8 @@ DOCUMENTATION = '''
token_id:
description:
- Proxmox authentication token ID.
- - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_TOKEN_ID) will be used instead.
- - To use token authentication, you must also specify I(token_secret). If you do not specify I(token_id) and I(token_secret),
+ - If the value is not specified in the inventory configuration, the value of environment variable E(PROXMOX_TOKEN_ID) will be used instead.
+ - To use token authentication, you must also specify O(token_secret). If you do not specify O(token_id) and O(token_secret),
you must set a password instead.
- Make sure to grant explicit pve permissions to the token or disable 'privilege separation' to use the users' privileges instead.
version_added: 4.8.0
@@ -73,8 +73,8 @@ DOCUMENTATION = '''
token_secret:
description:
- Proxmox authentication token secret.
- - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_TOKEN_SECRET) will be used instead.
- - To use token authentication, you must also specify I(token_id). If you do not specify I(token_id) and I(token_secret),
+ - If the value is not specified in the inventory configuration, the value of environment variable E(PROXMOX_TOKEN_SECRET) will be used instead.
+ - To use token authentication, you must also specify O(token_id). If you do not specify O(token_id) and O(token_secret),
you must set a password instead.
version_added: 4.8.0
type: str
@@ -95,27 +95,32 @@ DOCUMENTATION = '''
want_facts:
description:
- Gather LXC/QEMU configuration facts.
- - When I(want_facts) is set to C(true) more details about QEMU VM status are possible, besides the running and stopped states.
+ - When O(want_facts) is set to V(true) more details about QEMU VM status are possible, besides the running and stopped states.
Currently if the VM is running and it is suspended, the status will be running and the machine will be in C(running) group,
- but its actual state will be paused. See I(qemu_extended_statuses) for how to retrieve the real status.
+ but its actual state will be paused. See O(qemu_extended_statuses) for how to retrieve the real status.
default: false
type: bool
qemu_extended_statuses:
description:
- - Requires I(want_facts) to be set to C(true) to function. This will allow you to differentiate betweend C(paused) and C(prelaunch)
+ - Requires O(want_facts) to be set to V(true) to function. This will allow you to differentiate between C(paused) and C(prelaunch)
statuses of the QEMU VMs.
- - This introduces multiple groups [prefixed with I(group_prefix)] C(prelaunch) and C(paused).
+ - This introduces multiple groups [prefixed with O(group_prefix)] C(prelaunch) and C(paused).
default: false
type: bool
version_added: 5.1.0
want_proxmox_nodes_ansible_host:
version_added: 3.0.0
description:
- - Whether to set C(ansbile_host) for proxmox nodes.
- - When set to C(true) (default), will use the first available interface. This can be different from what you expect.
- - The default of this option changed from C(true) to C(false) in community.general 6.0.0.
+ - Whether to set C(ansible_host) for proxmox nodes.
+ - When set to V(true) (default), will use the first available interface. This can be different from what you expect.
+ - The default of this option changed from V(true) to V(false) in community.general 6.0.0.
type: bool
default: false
+ exclude_nodes:
+ description: Exclude proxmox nodes and the nodes-group from the inventory output.
+ type: bool
+ default: false
+ version_added: 8.1.0
filters:
version_added: 4.6.0
description: A list of Jinja templates that allow filtering hosts.
@@ -166,7 +171,6 @@ plugin: community.general.proxmox
url: http://pve.domain.com:8006
user: ansible@pve
password: secure
-validate_certs: false
want_facts: true
keyed_groups:
# proxmox_tags_parsed is an example of a fact only returned when 'want_facts=true'
@@ -187,10 +191,10 @@ want_proxmox_nodes_ansible_host: true
# Note: my_inv_var demonstrates how to add a string variable to every host used by the inventory.
# my.proxmox.yml
plugin: community.general.proxmox
-url: http://pve.domain.com:8006
+url: http://192.168.1.2:8006
user: ansible@pve
password: secure
-validate_certs: false
+validate_certs: false # only do this when you trust the network!
want_facts: true
want_proxmox_nodes_ansible_host: false
compose:
@@ -222,6 +226,7 @@ from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.utils.display import Display
+from ansible.utils.unsafe_proxy import wrap_var as make_unsafe
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
@@ -330,7 +335,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
self._cache[self.cache_key][url] = data
- return self._cache[self.cache_key][url]
+ return make_unsafe(self._cache[self.cache_key][url])
def _get_nodes(self):
return self._get_json("%s/api2/json/nodes" % self.proxmox_url)
@@ -565,9 +570,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
for group in default_groups:
self.inventory.add_group(self._group('all_%s' % (group)))
-
nodes_group = self._group('nodes')
- self.inventory.add_group(nodes_group)
+ if not self.exclude_nodes:
+ self.inventory.add_group(nodes_group)
want_proxmox_nodes_ansible_host = self.get_option("want_proxmox_nodes_ansible_host")
@@ -577,19 +582,24 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
for node in self._get_nodes():
if not node.get('node'):
continue
-
- self.inventory.add_host(node['node'])
- if node['type'] == 'node':
+ if not self.exclude_nodes:
+ self.inventory.add_host(node['node'])
+ if node['type'] == 'node' and not self.exclude_nodes:
self.inventory.add_child(nodes_group, node['node'])
if node['status'] == 'offline':
continue
# get node IP address
- if want_proxmox_nodes_ansible_host:
+ if want_proxmox_nodes_ansible_host and not self.exclude_nodes:
ip = self._get_node_ip(node['node'])
self.inventory.set_variable(node['node'], 'ansible_host', ip)
+ # Setting composite variables
+ if not self.exclude_nodes:
+ variables = self.inventory.get_host(node['node']).get_vars()
+ self._set_composite_vars(self.get_option('compose'), variables, node['node'], strict=self.strict)
+
# add LXC/Qemu groups for the node
for ittype in ('lxc', 'qemu'):
node_type_group = self._group('%s_%s' % (node['node'], ittype))
@@ -631,8 +641,8 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
if self.get_option('qemu_extended_statuses') and not self.get_option('want_facts'):
raise AnsibleError('You must set want_facts to True if you want to use qemu_extended_statuses.')
-
# read rest of options
+ self.exclude_nodes = self.get_option('exclude_nodes')
self.cache_key = self.get_cache_key(path)
self.use_cache = cache and self.get_option('cache')
self.host_filters = self.get_option('filters')
diff --git a/ansible_collections/community/general/plugins/inventory/scaleway.py b/ansible_collections/community/general/plugins/inventory/scaleway.py
index 6aacc9f66..601129f56 100644
--- a/ansible_collections/community/general/plugins/inventory/scaleway.py
+++ b/ansible_collections/community/general/plugins/inventory/scaleway.py
@@ -37,7 +37,7 @@ DOCUMENTATION = r'''
scw_profile:
description:
- The config profile to use in config file.
- - By default uses the one specified as C(active_profile) in the config file, or falls back to C(default) if that is not defined.
+ - By default uses the one specified as C(active_profile) in the config file, or falls back to V(default) if that is not defined.
type: string
version_added: 4.4.0
oauth_token:
@@ -124,6 +124,7 @@ from ansible_collections.community.general.plugins.module_utils.scaleway import
from ansible.module_utils.urls import open_url
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.module_utils.six import raise_from
+from ansible.utils.unsafe_proxy import wrap_var as make_unsafe
import ansible.module_utils.six.moves.urllib.parse as urllib_parse
@@ -279,7 +280,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
zone_info = SCALEWAY_LOCATION[zone]
url = _build_server_url(zone_info["api_endpoint"])
- raw_zone_hosts_infos = _fetch_information(url=url, token=token)
+ raw_zone_hosts_infos = make_unsafe(_fetch_information(url=url, token=token))
for host_infos in raw_zone_hosts_infos:
@@ -341,4 +342,4 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
hostname_preference = self.get_option("hostnames")
for zone in self._get_zones(config_zones):
- self.do_zone_inventory(zone=zone, token=token, tags=tags, hostname_preferences=hostname_preference)
+ self.do_zone_inventory(zone=make_unsafe(zone), token=token, tags=tags, hostname_preferences=hostname_preference)
diff --git a/ansible_collections/community/general/plugins/inventory/stackpath_compute.py b/ansible_collections/community/general/plugins/inventory/stackpath_compute.py
index 39f880e82..9a556d39e 100644
--- a/ansible_collections/community/general/plugins/inventory/stackpath_compute.py
+++ b/ansible_collections/community/general/plugins/inventory/stackpath_compute.py
@@ -72,6 +72,7 @@ from ansible.plugins.inventory import (
Cacheable
)
from ansible.utils.display import Display
+from ansible.utils.unsafe_proxy import wrap_var as make_unsafe
display = Display()
@@ -271,7 +272,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
if not cache or cache_needs_update:
results = self._query()
- self._populate(results)
+ self._populate(make_unsafe(results))
# If the cache has expired/doesn't exist or
# if refresh_inventory/flush cache is used
diff --git a/ansible_collections/community/general/plugins/inventory/virtualbox.py b/ansible_collections/community/general/plugins/inventory/virtualbox.py
index c926d8b44..8604808e1 100644
--- a/ansible_collections/community/general/plugins/inventory/virtualbox.py
+++ b/ansible_collections/community/general/plugins/inventory/virtualbox.py
@@ -62,6 +62,7 @@ from ansible.module_utils.common.text.converters import to_bytes, to_native, to_
from ansible.module_utils.common._collections_compat import MutableMapping
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
from ansible.module_utils.common.process import get_bin_path
+from ansible.utils.unsafe_proxy import wrap_var as make_unsafe
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
@@ -116,6 +117,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars[host], host, strict=strict)
def _populate_from_cache(self, source_data):
+ source_data = make_unsafe(source_data)
hostvars = source_data.pop('_meta', {}).get('hostvars', {})
for group in source_data:
if group == 'all':
@@ -162,7 +164,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
v = v.strip()
# found host
if k.startswith('Name') and ',' not in v: # some setting strings appear in Name
- current_host = v
+ current_host = make_unsafe(v)
if current_host not in hostvars:
hostvars[current_host] = {}
self.inventory.add_host(current_host)
@@ -170,12 +172,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
# try to get network info
netdata = self._query_vbox_data(current_host, netinfo)
if netdata:
- self.inventory.set_variable(current_host, 'ansible_host', netdata)
+ self.inventory.set_variable(current_host, 'ansible_host', make_unsafe(netdata))
# found groups
elif k == 'Groups':
for group in v.split('/'):
if group:
+ group = make_unsafe(group)
group = self.inventory.add_group(group)
self.inventory.add_child(group, current_host)
if group not in cacheable_results:
@@ -185,17 +188,17 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
else:
# found vars, accumulate in hostvars for clean inventory set
- pref_k = 'vbox_' + k.strip().replace(' ', '_')
+ pref_k = make_unsafe('vbox_' + k.strip().replace(' ', '_'))
leading_spaces = len(k) - len(k.lstrip(' '))
if 0 < leading_spaces <= 2:
if prevkey not in hostvars[current_host] or not isinstance(hostvars[current_host][prevkey], dict):
hostvars[current_host][prevkey] = {}
- hostvars[current_host][prevkey][pref_k] = v
+ hostvars[current_host][prevkey][pref_k] = make_unsafe(v)
elif leading_spaces > 2:
continue
else:
if v != '':
- hostvars[current_host][pref_k] = v
+ hostvars[current_host][pref_k] = make_unsafe(v)
if self._ungrouped_host(current_host, cacheable_results):
if 'ungrouped' not in cacheable_results:
cacheable_results['ungrouped'] = {'hosts': []}
diff --git a/ansible_collections/community/general/plugins/inventory/xen_orchestra.py b/ansible_collections/community/general/plugins/inventory/xen_orchestra.py
index ddbdd9bb0..96dd99770 100644
--- a/ansible_collections/community/general/plugins/inventory/xen_orchestra.py
+++ b/ansible_collections/community/general/plugins/inventory/xen_orchestra.py
@@ -23,21 +23,21 @@ DOCUMENTATION = '''
- inventory_cache
options:
plugin:
- description: The name of this plugin, it should always be set to C(community.general.xen_orchestra) for this plugin to recognize it as its own.
+ description: The name of this plugin, it should always be set to V(community.general.xen_orchestra) for this plugin to recognize it as its own.
required: true
choices: ['community.general.xen_orchestra']
type: str
api_host:
description:
- API host to XOA API.
- - If the value is not specified in the inventory configuration, the value of environment variable C(ANSIBLE_XO_HOST) will be used instead.
+ - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_HOST) will be used instead.
type: str
env:
- name: ANSIBLE_XO_HOST
user:
description:
- Xen Orchestra user.
- - If the value is not specified in the inventory configuration, the value of environment variable C(ANSIBLE_XO_USER) will be used instead.
+ - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_USER) will be used instead.
required: true
type: str
env:
@@ -45,7 +45,7 @@ DOCUMENTATION = '''
password:
description:
- Xen Orchestra password.
- - If the value is not specified in the inventory configuration, the value of environment variable C(ANSIBLE_XO_PASSWORD) will be used instead.
+ - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_PASSWORD) will be used instead.
required: true
type: str
env:
@@ -82,6 +82,7 @@ from time import sleep
from ansible.errors import AnsibleError
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+from ansible.utils.unsafe_proxy import wrap_var as make_unsafe
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
@@ -347,4 +348,4 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
self.protocol = 'ws'
objects = self._get_objects()
- self._populate(objects)
+ self._populate(make_unsafe(objects))
diff --git a/ansible_collections/community/general/plugins/lookup/bitwarden.py b/ansible_collections/community/general/plugins/lookup/bitwarden.py
index 27de1afe6..2cb2d19a1 100644
--- a/ansible_collections/community/general/plugins/lookup/bitwarden.py
+++ b/ansible_collections/community/general/plugins/lookup/bitwarden.py
@@ -13,7 +13,7 @@ DOCUMENTATION = """
- bw (command line utility)
- be logged into bitwarden
- bitwarden vault unlocked
- - C(BW_SESSION) environment variable set
+ - E(BW_SESSION) environment variable set
short_description: Retrieve secrets from Bitwarden
version_added: 5.4.0
description:
@@ -25,7 +25,11 @@ DOCUMENTATION = """
type: list
elements: str
search:
- description: Field to retrieve, for example C(name) or C(id).
+ description:
+ - Field to retrieve, for example V(name) or V(id).
+ - If set to V(id), only zero or one element can be returned.
+ Use the Jinja C(first) filter to get the only list element.
+ - When O(collection_id) is set, this field can be undefined to retrieve the whole collection records.
type: str
default: name
version_added: 5.7.0
@@ -36,40 +40,57 @@ DOCUMENTATION = """
description: Collection ID to filter results by collection. Leave unset to skip filtering.
type: str
version_added: 6.3.0
+ bw_session:
+ description: Pass session key instead of reading from env.
+ type: str
+ version_added: 8.4.0
"""
EXAMPLES = """
-- name: "Get 'password' from Bitwarden record named 'a_test'"
+- name: "Get 'password' from all Bitwarden records named 'a_test'"
ansible.builtin.debug:
msg: >-
{{ lookup('community.general.bitwarden', 'a_test', field='password') }}
-- name: "Get 'password' from Bitwarden record with id 'bafba515-af11-47e6-abe3-af1200cd18b2'"
+- name: "Get 'password' from Bitwarden record with ID 'bafba515-af11-47e6-abe3-af1200cd18b2'"
ansible.builtin.debug:
msg: >-
- {{ lookup('community.general.bitwarden', 'bafba515-af11-47e6-abe3-af1200cd18b2', search='id', field='password') }}
+ {{ lookup('community.general.bitwarden', 'bafba515-af11-47e6-abe3-af1200cd18b2', search='id', field='password') | first }}
-- name: "Get 'password' from Bitwarden record named 'a_test' from collection"
+- name: "Get 'password' from all Bitwarden records named 'a_test' from collection"
ansible.builtin.debug:
msg: >-
{{ lookup('community.general.bitwarden', 'a_test', field='password', collection_id='bafba515-af11-47e6-abe3-af1200cd18b2') }}
-- name: "Get full Bitwarden record named 'a_test'"
+- name: "Get list of all full Bitwarden records named 'a_test'"
ansible.builtin.debug:
msg: >-
{{ lookup('community.general.bitwarden', 'a_test') }}
-- name: "Get custom field 'api_key' from Bitwarden record named 'a_test'"
+- name: "Get custom field 'api_key' from all Bitwarden records named 'a_test'"
ansible.builtin.debug:
msg: >-
{{ lookup('community.general.bitwarden', 'a_test', field='api_key') }}
+
+- name: "Get 'password' from all Bitwarden records named 'a_test', using given session key"
+ ansible.builtin.debug:
+ msg: >-
+ {{ lookup('community.general.bitwarden', 'a_test', field='password', bw_session='bXZ9B5TXi6...') }}
+
+- name: "Get all Bitwarden records from collection"
+ ansible.builtin.debug:
+ msg: >-
+ {{ lookup('community.general.bitwarden', None, collection_id='bafba515-af11-47e6-abe3-af1200cd18b2') }}
"""
RETURN = """
_raw:
- description: List of requested field or JSON object of list of matches.
+ description:
+ - A one-element list that contains a list of requested fields or JSON objects of matches.
+ - If you use C(query), you get a list of lists. If you use C(lookup) without C(wantlist=true),
+ this always gets reduced to a list of field values or JSON objects.
type: list
- elements: raw
+ elements: list
"""
from subprocess import Popen, PIPE
@@ -88,76 +109,120 @@ class Bitwarden(object):
def __init__(self, path='bw'):
self._cli_path = path
+ self._session = None
@property
def cli_path(self):
return self._cli_path
@property
+ def session(self):
+ return self._session
+
+ @session.setter
+ def session(self, value):
+ self._session = value
+
+ @property
def unlocked(self):
out, err = self._run(['status'], stdin="")
decoded = AnsibleJSONDecoder().raw_decode(out)[0]
return decoded['status'] == 'unlocked'
def _run(self, args, stdin=None, expected_rc=0):
+ if self.session:
+ args += ['--session', self.session]
+
p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE)
out, err = p.communicate(to_bytes(stdin))
rc = p.wait()
if rc != expected_rc:
+ if len(args) > 2 and args[0] == 'get' and args[1] == 'item' and b'Not found.' in err:
+ return 'null', ''
raise BitwardenException(err)
return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict')
- def _get_matches(self, search_value, search_field, collection_id):
+ def _get_matches(self, search_value, search_field, collection_id=None):
"""Return matching records whose search_field is equal to key.
"""
# Prepare set of params for Bitwarden CLI
- params = ['list', 'items', '--search', search_value]
+ if search_value:
+ if search_field == 'id':
+ params = ['get', 'item', search_value]
+ else:
+ params = ['list', 'items', '--search', search_value]
+ if collection_id:
+ params.extend(['--collectionid', collection_id])
+ else:
+ if not collection_id:
+ raise AnsibleError("search_value is required if collection_id is not set.")
- if collection_id:
- params.extend(['--collectionid', collection_id])
+ params = ['list', 'items', '--collectionid', collection_id]
out, err = self._run(params)
# This includes things that matched in different fields.
initial_matches = AnsibleJSONDecoder().raw_decode(out)[0]
+ if search_field == 'id' or not search_value:
+ if initial_matches is None:
+ initial_matches = []
+ else:
+ initial_matches = [initial_matches]
+
# Filter to only include results from the right field.
return [item for item in initial_matches if item[search_field] == search_value]
- def get_field(self, field, search_value, search_field="name", collection_id=None):
+ def get_field(self, field, search_value=None, search_field="name", collection_id=None):
"""Return a list of the specified field for records whose search_field match search_value
and filtered by collection if collection has been provided.
If field is None, return the whole record for each match.
"""
matches = self._get_matches(search_value, search_field, collection_id)
-
- if field in ['autofillOnPageLoad', 'password', 'passwordRevisionDate', 'totp', 'uris', 'username']:
- return [match['login'][field] for match in matches]
- elif not field:
+ if not field:
return matches
- else:
- custom_field_matches = []
- for match in matches:
+ field_matches = []
+ for match in matches:
+ # if there are no custom fields, then `match` has no key 'fields'
+ if 'fields' in match:
+ custom_field_found = False
for custom_field in match['fields']:
- if custom_field['name'] == field:
- custom_field_matches.append(custom_field['value'])
- if matches and not custom_field_matches:
- raise AnsibleError("Custom field {field} does not exist in {search_value}".format(field=field, search_value=search_value))
- return custom_field_matches
+ if field == custom_field['name']:
+ field_matches.append(custom_field['value'])
+ custom_field_found = True
+ break
+ if custom_field_found:
+ continue
+ if 'login' in match and field in match['login']:
+ field_matches.append(match['login'][field])
+ continue
+ if field in match:
+ field_matches.append(match[field])
+ continue
+
+ if matches and not field_matches:
+ raise AnsibleError("field {field} does not exist in {search_value}".format(field=field, search_value=search_value))
+
+ return field_matches
class LookupModule(LookupBase):
- def run(self, terms, variables=None, **kwargs):
+ def run(self, terms=None, variables=None, **kwargs):
self.set_options(var_options=variables, direct=kwargs)
field = self.get_option('field')
search_field = self.get_option('search')
collection_id = self.get_option('collection_id')
+ _bitwarden.session = self.get_option('bw_session')
+
if not _bitwarden.unlocked:
raise AnsibleError("Bitwarden Vault locked. Run 'bw unlock'.")
+ if not terms:
+ return [_bitwarden.get_field(field, None, search_field, collection_id)]
+
return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms]
diff --git a/ansible_collections/community/general/plugins/lookup/bitwarden_secrets_manager.py b/ansible_collections/community/general/plugins/lookup/bitwarden_secrets_manager.py
new file mode 100644
index 000000000..2d6706bee
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/bitwarden_secrets_manager.py
@@ -0,0 +1,125 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2023, jantari (https://github.com/jantari)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+ name: bitwarden_secrets_manager
+ author:
+ - jantari (@jantari)
+ requirements:
+ - bws (command line utility)
+ short_description: Retrieve secrets from Bitwarden Secrets Manager
+ version_added: 7.2.0
+ description:
+ - Retrieve secrets from Bitwarden Secrets Manager.
+ options:
+ _terms:
+ description: Secret ID(s) to fetch values for.
+ required: true
+ type: list
+ elements: str
+ bws_access_token:
+ description: The BWS access token to use for this lookup.
+ env:
+ - name: BWS_ACCESS_TOKEN
+ required: true
+ type: str
+"""
+
+EXAMPLES = """
+- name: Get a secret relying on the BWS_ACCESS_TOKEN environment variable for authentication
+ ansible.builtin.debug:
+ msg: >-
+ {{ lookup("community.general.bitwarden_secrets_manager", "2bc23e48-4932-40de-a047-5524b7ddc972") }}
+
+- name: Get a secret passing an explicit access token for authentication
+ ansible.builtin.debug:
+ msg: >-
+ {{
+ lookup(
+ "community.general.bitwarden_secrets_manager",
+ "2bc23e48-4932-40de-a047-5524b7ddc972",
+ bws_access_token="9.4f570d14-4b54-42f5-bc07-60f4450b1db5.YmluYXJ5LXNvbWV0aGluZy0xMjMK:d2h5IGhlbGxvIHRoZXJlCg=="
+ )
+ }}
+
+- name: Get two different secrets each using a different access token for authentication
+ ansible.builtin.debug:
+ msg:
+ - '{{ lookup("community.general.bitwarden_secrets_manager", "2bc23e48-4932-40de-a047-5524b7ddc972", bws_access_token=token1) }}'
+ - '{{ lookup("community.general.bitwarden_secrets_manager", "9d89af4c-eb5d-41f5-bb0f-4ae81215c768", bws_access_token=token2) }}'
+ vars:
+ token1: "9.4f570d14-4b54-42f5-bc07-60f4450b1db5.YmluYXJ5LXNvbWV0aGluZy0xMjMK:d2h5IGhlbGxvIHRoZXJlCg=="
+ token2: "1.69b72797-6ea9-4687-a11e-848e41a30ae6.YW5zaWJsZSBpcyBncmVhdD8K:YW5zaWJsZSBpcyBncmVhdAo="
+
+- name: Get just the value of a secret
+ ansible.builtin.debug:
+ msg: >-
+ {{ lookup("community.general.bitwarden_secrets_manager", "2bc23e48-4932-40de-a047-5524b7ddc972").value }}
+"""
+
+RETURN = """
+ _raw:
+ description: List containing one or more secrets.
+ type: list
+ elements: dict
+"""
+
+from subprocess import Popen, PIPE
+
+from ansible.errors import AnsibleLookupError
+from ansible.module_utils.common.text.converters import to_text
+from ansible.parsing.ajson import AnsibleJSONDecoder
+from ansible.plugins.lookup import LookupBase
+
+
+class BitwardenSecretsManagerException(AnsibleLookupError):
+ pass
+
+
+class BitwardenSecretsManager(object):
+ def __init__(self, path='bws'):
+ self._cli_path = path
+
+ @property
+ def cli_path(self):
+ return self._cli_path
+
+ def _run(self, args, stdin=None):
+ p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE)
+ out, err = p.communicate(stdin)
+ rc = p.wait()
+ return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict'), rc
+
+ def get_secret(self, secret_id, bws_access_token):
+ """Get and return the secret with the given secret_id.
+ """
+
+ # Prepare set of params for Bitwarden Secrets Manager CLI
+ # Color output was not always disabled correctly with the default 'auto' setting so explicitly disable it.
+ params = [
+ '--color', 'no',
+ '--access-token', bws_access_token,
+ 'get', 'secret', secret_id
+ ]
+
+ out, err, rc = self._run(params)
+ if rc != 0:
+ raise BitwardenSecretsManagerException(to_text(err))
+
+ return AnsibleJSONDecoder().raw_decode(out)[0]
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables=None, **kwargs):
+ self.set_options(var_options=variables, direct=kwargs)
+ bws_access_token = self.get_option('bws_access_token')
+
+ return [_bitwarden_secrets_manager.get_secret(term, bws_access_token) for term in terms]
+
+
+_bitwarden_secrets_manager = BitwardenSecretsManager()
diff --git a/ansible_collections/community/general/plugins/lookup/collection_version.py b/ansible_collections/community/general/plugins/lookup/collection_version.py
index 4d25585b8..33316fc2b 100644
--- a/ansible_collections/community/general/plugins/lookup/collection_version.py
+++ b/ansible_collections/community/general/plugins/lookup/collection_version.py
@@ -13,22 +13,22 @@ short_description: Retrieves the version of an installed collection
description:
- This lookup allows to query the version of an installed collection, and to determine whether a
collection is installed at all.
- - By default it returns C(none) for non-existing collections and C(*) for collections without a
+ - By default it returns V(none) for non-existing collections and V(*) for collections without a
version number. The latter should only happen in development environments, or when installing
a collection from git which has no version in its C(galaxy.yml). This behavior can be adjusted
- by providing other values with I(result_not_found) and I(result_no_version).
+ by providing other values with O(result_not_found) and O(result_no_version).
options:
_terms:
description:
- The collections to look for.
- - For example C(community.general).
+ - For example V(community.general).
type: list
elements: str
required: true
result_not_found:
description:
- The value to return when the collection could not be found.
- - By default, C(none) is returned.
+ - By default, V(none) is returned.
type: string
default: ~
result_no_version:
@@ -36,7 +36,7 @@ options:
- The value to return when the collection has no version number.
- This can happen for collections installed from git which do not have a version number
in C(galaxy.yml).
- - By default, C(*) is returned.
+ - By default, V(*) is returned.
type: string
default: '*'
"""
@@ -51,11 +51,11 @@ RETURN = """
_raw:
description:
- The version number of the collections listed as input.
- - If a collection can not be found, it will return the value provided in I(result_not_found).
- By default, this is C(none).
+ - If a collection can not be found, it will return the value provided in O(result_not_found).
+ By default, this is V(none).
- If a collection can be found, but the version not identified, it will return the value provided in
- I(result_no_version). By default, this is C(*). This can happen for collections installed
- from git which do not have a version number in C(galaxy.yml).
+ O(result_no_version). By default, this is V(*). This can happen for collections installed
+ from git which do not have a version number in V(galaxy.yml).
type: list
elements: str
"""
@@ -98,15 +98,10 @@ def load_collection_meta(collection_pkg, no_version='*'):
if os.path.exists(manifest_path):
return load_collection_meta_manifest(manifest_path)
- # Try to load galaxy.y(a)ml
+ # Try to load galaxy.yml
galaxy_path = os.path.join(path, 'galaxy.yml')
- galaxy_alt_path = os.path.join(path, 'galaxy.yaml')
- # galaxy.yaml was only supported in ansible-base 2.10 and ansible-core 2.11. Support was removed
- # in https://github.com/ansible/ansible/commit/595413d11346b6f26bb3d9df2d8e05f2747508a3 for
- # ansible-core 2.12.
- for path in (galaxy_path, galaxy_alt_path):
- if os.path.exists(path):
- return load_collection_meta_galaxy(path, no_version=no_version)
+ if os.path.exists(galaxy_path):
+ return load_collection_meta_galaxy(galaxy_path, no_version=no_version)
return {}
diff --git a/ansible_collections/community/general/plugins/lookup/consul_kv.py b/ansible_collections/community/general/plugins/lookup/consul_kv.py
index f17f1b269..f8aadadc1 100644
--- a/ansible_collections/community/general/plugins/lookup/consul_kv.py
+++ b/ansible_collections/community/general/plugins/lookup/consul_kv.py
@@ -38,23 +38,20 @@ DOCUMENTATION = '''
default: localhost
description:
- The target to connect to, must be a resolvable address.
- Will be determined from C(ANSIBLE_CONSUL_URL) if that is set.
- - "C(ANSIBLE_CONSUL_URL) should look like this: C(https://my.consul.server:8500)"
- env:
- - name: ANSIBLE_CONSUL_URL
+ - Will be determined from E(ANSIBLE_CONSUL_URL) if that is set.
ini:
- section: lookup_consul
key: host
port:
description:
- The port of the target host to connect to.
- - If you use C(ANSIBLE_CONSUL_URL) this value will be used from there.
+ - If you use E(ANSIBLE_CONSUL_URL) this value will be used from there.
default: 8500
scheme:
default: http
description:
- Whether to use http or https.
- - If you use C(ANSIBLE_CONSUL_URL) this value will be used from there.
+ - If you use E(ANSIBLE_CONSUL_URL) this value will be used from there.
validate_certs:
default: true
description: Whether to verify the ssl connection or not.
@@ -71,7 +68,9 @@ DOCUMENTATION = '''
- section: lookup_consul
key: client_cert
url:
- description: "The target to connect to, should look like this: C(https://my.consul.server:8500)."
+ description:
+ - The target to connect to.
+ - "Should look like this: V(https://my.consul.server:8500)."
type: str
version_added: 1.0.0
env:
diff --git a/ansible_collections/community/general/plugins/lookup/dependent.py b/ansible_collections/community/general/plugins/lookup/dependent.py
index 54714344e..31634e6e6 100644
--- a/ansible_collections/community/general/plugins/lookup/dependent.py
+++ b/ansible_collections/community/general/plugins/lookup/dependent.py
@@ -22,7 +22,7 @@ options:
The name is the index that is used in the result object. The value is iterated over as described below.
- If the value is a list, it is simply iterated over.
- If the value is a dictionary, it is iterated over and returned as if they would be processed by the
- R(ansible.builtin.dict2items filter,ansible_collections.ansible.builtin.dict2items_filter).
+ P(ansible.builtin.dict2items#filter) filter.
- If the value is a string, it is evaluated as Jinja2 expressions which can access the previously chosen
elements with C(item.<index_name>). The result must be a list or a dictionary.
type: list
diff --git a/ansible_collections/community/general/plugins/lookup/dig.py b/ansible_collections/community/general/plugins/lookup/dig.py
index fa915220b..5be57cec7 100644
--- a/ansible_collections/community/general/plugins/lookup/dig.py
+++ b/ansible_collections/community/general/plugins/lookup/dig.py
@@ -21,7 +21,7 @@ DOCUMENTATION = '''
- In addition to (default) A record, it is also possible to specify a different record type that should be queried.
This can be done by either passing-in additional parameter of format qtype=TYPE to the dig lookup, or by appending /TYPE to the FQDN being queried.
- If multiple values are associated with the requested record, the results will be returned as a comma-separated list.
- In such cases you may want to pass option I(wantlist=true) to the lookup call, or alternatively use C(query) instead of C(lookup),
+ In such cases you may want to pass option C(wantlist=true) to the lookup call, or alternatively use C(query) instead of C(lookup),
which will result in the record values being returned as a list over which you can iterate later on.
- By default, the lookup will rely on system-wide configured DNS servers for performing the query.
It is also possible to explicitly specify DNS servers to query using the @DNS_SERVER_1,DNS_SERVER_2,...,DNS_SERVER_N notation.
@@ -34,8 +34,8 @@ DOCUMENTATION = '''
qtype:
description:
- Record type to query.
- - C(DLV) has been removed in community.general 6.0.0.
- - C(CAA) has been added in community.general 6.3.0.
+ - V(DLV) has been removed in community.general 6.0.0.
+ - V(CAA) has been added in community.general 6.3.0.
type: str
default: 'A'
choices: [A, ALL, AAAA, CAA, CNAME, DNAME, DNSKEY, DS, HINFO, LOC, MX, NAPTR, NS, NSEC3PARAM, PTR, RP, RRSIG, SOA, SPF, SRV, SSHFP, TLSA, TXT]
@@ -51,17 +51,17 @@ DOCUMENTATION = '''
fail_on_error:
description:
- Abort execution on lookup errors.
- - The default for this option will likely change to C(true) in the future.
- The current default, C(false), is used for backwards compatibility, and will result in empty strings
- or the string C(NXDOMAIN) in the result in case of errors.
+ - The default for this option will likely change to V(true) in the future.
+ The current default, V(false), is used for backwards compatibility, and will result in empty strings
+ or the string V(NXDOMAIN) in the result in case of errors.
default: false
type: bool
version_added: 5.4.0
real_empty:
description:
- - Return empty result without empty strings, and return empty list instead of C(NXDOMAIN).
- - The default for this option will likely change to C(true) in the future.
- - This option will be forced to C(true) if multiple domains to be queried are specified.
+ - Return empty result without empty strings, and return empty list instead of V(NXDOMAIN).
+ - The default for this option will likely change to V(true) in the future.
+ - This option will be forced to V(true) if multiple domains to be queried are specified.
default: false
type: bool
version_added: 6.0.0
@@ -70,6 +70,11 @@ DOCUMENTATION = '''
- "Class."
type: str
default: 'IN'
+ tcp:
+ description: Use TCP to lookup DNS records.
+ default: false
+ type: bool
+ version_added: 7.5.0
notes:
- ALL is not a record per-se, merely the listed fields are available for any record results you retrieve in the form of a dictionary.
- While the 'dig' lookup plugin supports anything which dnspython supports out of the box, only a subset can be converted into a dictionary.
@@ -329,6 +334,7 @@ class LookupModule(LookupBase):
flat = self.get_option('flat')
fail_on_error = self.get_option('fail_on_error')
real_empty = self.get_option('real_empty')
+ tcp = self.get_option('tcp')
try:
rdclass = dns.rdataclass.from_text(self.get_option('class'))
except Exception as e:
@@ -375,6 +381,8 @@ class LookupModule(LookupBase):
fail_on_error = boolean(arg)
elif opt == 'real_empty':
real_empty = boolean(arg)
+ elif opt == 'tcp':
+ tcp = boolean(arg)
continue
@@ -408,7 +416,7 @@ class LookupModule(LookupBase):
for domain in domains:
try:
- answers = myres.query(domain, qtype, rdclass=rdclass)
+ answers = myres.query(domain, qtype, rdclass=rdclass, tcp=tcp)
for rdata in answers:
s = rdata.to_text()
if qtype.upper() == 'TXT':
diff --git a/ansible_collections/community/general/plugins/lookup/dnstxt.py b/ansible_collections/community/general/plugins/lookup/dnstxt.py
index 55067dc82..1ce511b84 100644
--- a/ansible_collections/community/general/plugins/lookup/dnstxt.py
+++ b/ansible_collections/community/general/plugins/lookup/dnstxt.py
@@ -22,8 +22,8 @@ DOCUMENTATION = '''
elements: string
real_empty:
description:
- - Return empty result without empty strings, and return empty list instead of C(NXDOMAIN).
- - The default for this option will likely change to C(true) in the future.
+ - Return empty result without empty strings, and return empty list instead of V(NXDOMAIN).
+ - The default for this option will likely change to V(true) in the future.
default: false
type: bool
version_added: 6.0.0
diff --git a/ansible_collections/community/general/plugins/lookup/dsv.py b/ansible_collections/community/general/plugins/lookup/dsv.py
index 91a9d9921..2dbb7db3e 100644
--- a/ansible_collections/community/general/plugins/lookup/dsv.py
+++ b/ansible_collections/community/general/plugins/lookup/dsv.py
@@ -13,15 +13,15 @@ short_description: Get secrets from Thycotic DevOps Secrets Vault
version_added: 1.0.0
description:
- Uses the Thycotic DevOps Secrets Vault Python SDK to get Secrets from a
- DSV I(tenant) using a I(client_id) and I(client_secret).
+ DSV O(tenant) using a O(client_id) and O(client_secret).
requirements:
- python-dsv-sdk - https://pypi.org/project/python-dsv-sdk/
options:
_terms:
- description: The path to the secret, e.g. C(/staging/servers/web1).
+ description: The path to the secret, for example V(/staging/servers/web1).
required: true
tenant:
- description: The first format parameter in the default I(url_template).
+ description: The first format parameter in the default O(url_template).
env:
- name: DSV_TENANT
ini:
@@ -31,7 +31,7 @@ options:
tld:
default: com
description: The top-level domain of the tenant; the second format
- parameter in the default I(url_template).
+ parameter in the default O(url_template).
env:
- name: DSV_TLD
ini:
@@ -47,7 +47,7 @@ options:
key: client_id
required: true
client_secret:
- description: The client secret associated with the specific I(client_id).
+ description: The client secret associated with the specific O(client_id).
env:
- name: DSV_CLIENT_SECRET
ini:
diff --git a/ansible_collections/community/general/plugins/lookup/etcd.py b/ansible_collections/community/general/plugins/lookup/etcd.py
index d6a12293e..5135e7487 100644
--- a/ansible_collections/community/general/plugins/lookup/etcd.py
+++ b/ansible_collections/community/general/plugins/lookup/etcd.py
@@ -24,7 +24,7 @@ DOCUMENTATION = '''
required: true
url:
description:
- - Environment variable with the url for the etcd server
+ - Environment variable with the URL for the etcd server
default: 'http://127.0.0.1:4001'
env:
- name: ANSIBLE_ETCD_URL
@@ -39,6 +39,10 @@ DOCUMENTATION = '''
- toggle checking that the ssl certificates are valid, you normally only want to turn this off with self-signed certs.
default: true
type: boolean
+ seealso:
+ - module: community.general.etcd3
+ - plugin: community.general.etcd3
+ plugin_type: lookup
'''
EXAMPLES = '''
@@ -50,7 +54,7 @@ EXAMPLES = '''
ansible.builtin.debug:
msg: "{{ lookup('community.general.etcd', 'foo', 'bar', 'baz') }}"
-- name: "since Ansible 2.5 you can set server options inline"
+- name: "you can set server options inline"
ansible.builtin.debug:
msg: "{{ lookup('community.general.etcd', 'foo', version='v2', url='http://192.168.0.27:4001') }}"
'''
@@ -58,7 +62,7 @@ EXAMPLES = '''
RETURN = '''
_raw:
description:
- - list of values associated with input keys
+ - List of values associated with input keys.
type: list
elements: string
'''
diff --git a/ansible_collections/community/general/plugins/lookup/etcd3.py b/ansible_collections/community/general/plugins/lookup/etcd3.py
index 7f0a0cf90..0bda006e3 100644
--- a/ansible_collections/community/general/plugins/lookup/etcd3.py
+++ b/ansible_collections/community/general/plugins/lookup/etcd3.py
@@ -32,10 +32,10 @@ DOCUMENTATION = '''
default: false
endpoints:
description:
- - Counterpart of C(ETCDCTL_ENDPOINTS) environment variable.
- Specify the etcd3 connection with and URL form eg. C(https://hostname:2379) or C(<host>:<port>) form.
- - The C(host) part is overwritten by I(host) option, if defined.
- - The C(port) part is overwritten by I(port) option, if defined.
+ - Counterpart of E(ETCDCTL_ENDPOINTS) environment variable.
+ Specify the etcd3 connection with and URL form, for example V(https://hostname:2379), or V(<host>:<port>) form.
+ - The V(host) part is overwritten by O(host) option, if defined.
+ - The V(port) part is overwritten by O(port) option, if defined.
env:
- name: ETCDCTL_ENDPOINTS
default: '127.0.0.1:2379'
@@ -43,12 +43,12 @@ DOCUMENTATION = '''
host:
description:
- etcd3 listening client host.
- - Takes precedence over I(endpoints).
+ - Takes precedence over O(endpoints).
type: str
port:
description:
- etcd3 listening client port.
- - Takes precedence over I(endpoints).
+ - Takes precedence over O(endpoints).
type: int
ca_cert:
description:
@@ -89,13 +89,13 @@ DOCUMENTATION = '''
type: str
notes:
- - I(host) and I(port) options take precedence over (endpoints) option.
- - The recommended way to connect to etcd3 server is using C(ETCDCTL_ENDPOINT)
- environment variable and keep I(endpoints), I(host), and I(port) unused.
+ - O(host) and O(port) options take precedence over (endpoints) option.
+ - The recommended way to connect to etcd3 server is using E(ETCDCTL_ENDPOINT)
+ environment variable and keep O(endpoints), O(host), and O(port) unused.
seealso:
- module: community.general.etcd3
- - ref: ansible_collections.community.general.etcd_lookup
- description: The etcd v2 lookup.
+ - plugin: community.general.etcd
+ plugin_type: lookup
requirements:
- "etcd3 >= 0.10"
diff --git a/ansible_collections/community/general/plugins/lookup/filetree.py b/ansible_collections/community/general/plugins/lookup/filetree.py
index f12cc4519..2131de99a 100644
--- a/ansible_collections/community/general/plugins/lookup/filetree.py
+++ b/ansible_collections/community/general/plugins/lookup/filetree.py
@@ -65,7 +65,7 @@ RETURN = r"""
src:
description:
- Full path to file.
- - Not returned when I(item.state) is set to C(directory).
+ - Not returned when RV(_raw[].state) is set to V(directory).
type: path
root:
description: Allows filtering by original location.
diff --git a/ansible_collections/community/general/plugins/lookup/flattened.py b/ansible_collections/community/general/plugins/lookup/flattened.py
index e955b6478..0071417a0 100644
--- a/ansible_collections/community/general/plugins/lookup/flattened.py
+++ b/ansible_collections/community/general/plugins/lookup/flattened.py
@@ -19,7 +19,7 @@ DOCUMENTATION = '''
elements: raw
required: true
notes:
- - Unlike the R(items lookup,ansible_collections.ansible.builtin.items_lookup) which only flattens 1 level,
+ - Unlike the P(ansible.builtin.items#lookup) lookup which only flattens 1 level,
this plugin will continue to flatten until it cannot find lists anymore.
- Aka highlander plugin, there can only be one (list).
'''
diff --git a/ansible_collections/community/general/plugins/lookup/github_app_access_token.py b/ansible_collections/community/general/plugins/lookup/github_app_access_token.py
new file mode 100644
index 000000000..5cd99b81c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/github_app_access_token.py
@@ -0,0 +1,156 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2023, Poh Wei Sheng <weisheng-p@hotmail.sg>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: github_app_access_token
+ author:
+ - Poh Wei Sheng (@weisheng-p)
+ short_description: Obtain short-lived Github App Access tokens
+ version_added: '8.2.0'
+ requirements:
+ - jwt (https://github.com/GehirnInc/python-jwt)
+ description:
+ - This generates a Github access token that can be used with a C(git) command, if you use a Github App.
+ options:
+ key_path:
+ description:
+ - Path to your private key.
+ required: true
+ type: path
+ app_id:
+ description:
+ - Your GitHub App ID, you can find this in the Settings page.
+ required: true
+ type: str
+ installation_id:
+ description:
+ - The installation ID that contains the git repository you would like access to.
+ - As of 2023-12-24, this can be found via Settings page > Integrations > Application. The last part of the URL in the
+ configure button is the installation ID.
+ - Alternatively, you can use PyGithub (U(https://github.com/PyGithub/PyGithub)) to get your installation ID.
+ required: true
+ type: str
+ token_expiry:
+ description:
+ - How long the token should last for in seconds.
+ default: 600
+ type: int
+'''
+
+EXAMPLES = '''
+- name: Get access token to be used for git checkout with app_id=123456, installation_id=64209
+ ansible.builtin.git:
+ repo: >-
+ https://x-access-token:{{ github_token }}@github.com/hidden_user/super-secret-repo.git
+ dest: /srv/checkout
+ vars:
+ github_token: >-
+ lookup('community.general.github_app_access_token', key_path='/home/to_your/key',
+ app_id='123456', installation_id='64209')
+'''
+
+RETURN = '''
+ _raw:
+ description: A one-element list containing your GitHub access token.
+ type: list
+ elements: str
+'''
+
+
+try:
+ from jwt import JWT, jwk_from_pem
+ HAS_JWT = True
+except ImportError:
+ HAS_JWT = False
+
+import time
+import json
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.display import Display
+
+if HAS_JWT:
+ jwt_instance = JWT()
+else:
+ jwk_from_pem = None
+ jwt_instance = None
+
+display = Display()
+
+
+def read_key(path):
+ try:
+ with open(path, 'rb') as pem_file:
+ return jwk_from_pem(pem_file.read())
+ except Exception as e:
+ raise AnsibleError("Error while parsing key file: {0}".format(e))
+
+
+def encode_jwt(app_id, jwk, exp=600):
+ now = int(time.time())
+ payload = {
+ 'iat': now,
+ 'exp': now + exp,
+ 'iss': app_id,
+ }
+ try:
+ return jwt_instance.encode(payload, jwk, alg='RS256')
+ except Exception as e:
+ raise AnsibleError("Error while encoding jwt: {0}".format(e))
+
+
+def post_request(generated_jwt, installation_id):
+ github_api_url = f'https://api.github.com/app/installations/{installation_id}/access_tokens'
+ headers = {
+ "Authorization": f'Bearer {generated_jwt}',
+ "Accept": "application/vnd.github.v3+json",
+ }
+ try:
+ response = open_url(github_api_url, headers=headers, method='POST')
+ except HTTPError as e:
+ try:
+ error_body = json.loads(e.read().decode())
+ display.vvv("Error returned: {0}".format(error_body))
+ except Exception:
+ error_body = {}
+ if e.code == 404:
+ raise AnsibleError("Github return error. Please confirm your installationd_id value is valid")
+ elif e.code == 401:
+ raise AnsibleError("Github return error. Please confirm your private key is valid")
+ raise AnsibleError("Unexpected data returned: {0} -- {1}".format(e, error_body))
+ response_body = response.read()
+ try:
+ json_data = json.loads(response_body.decode('utf-8'))
+ except json.decoder.JSONDecodeError as e:
+ raise AnsibleError("Error while dencoding JSON respone from github: {0}".format(e))
+ return json_data.get('token')
+
+
+def get_token(key_path, app_id, installation_id, expiry=600):
+ jwk = read_key(key_path)
+ generated_jwt = encode_jwt(app_id, jwk, exp=expiry)
+ return post_request(generated_jwt, installation_id)
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables=None, **kwargs):
+ if not HAS_JWT:
+ raise AnsibleError('Python jwt library is required. '
+ 'Please install using "pip install jwt"')
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ t = get_token(
+ self.get_option('key_path'),
+ self.get_option('app_id'),
+ self.get_option('installation_id'),
+ self.get_option('token_expiry'),
+ )
+
+ return [t]
diff --git a/ansible_collections/community/general/plugins/lookup/lmdb_kv.py b/ansible_collections/community/general/plugins/lookup/lmdb_kv.py
index 0950249dc..a37cff956 100644
--- a/ansible_collections/community/general/plugins/lookup/lmdb_kv.py
+++ b/ansible_collections/community/general/plugins/lookup/lmdb_kv.py
@@ -15,7 +15,7 @@ DOCUMENTATION = '''
description:
- This lookup returns a list of results from an LMDB DB corresponding to a list of items given to it.
requirements:
- - lmdb (python library https://lmdb.readthedocs.io/en/release/)
+ - lmdb (Python library U(https://lmdb.readthedocs.io/en/release/))
options:
_terms:
description: List of keys to query.
diff --git a/ansible_collections/community/general/plugins/lookup/merge_variables.py b/ansible_collections/community/general/plugins/lookup/merge_variables.py
index cd5fa5b7d..4fc33014c 100644
--- a/ansible_collections/community/general/plugins/lookup/merge_variables.py
+++ b/ansible_collections/community/general/plugins/lookup/merge_variables.py
@@ -10,16 +10,17 @@ DOCUMENTATION = """
author:
- Roy Lenferink (@rlenferink)
- Mark Ettema (@m-a-r-k-e)
+ - Alexander Petrenz (@alpex8)
name: merge_variables
short_description: merge variables with a certain suffix
description:
- This lookup returns the merged result of all variables in scope that match the given prefixes, suffixes, or
- regular expressions, optionally.
+ regular expressions, optionally.
version_added: 6.5.0
options:
_terms:
description:
- - Depending on the value of I(pattern_type), this is a list of prefixes, suffixes, or regular expressions
+ - Depending on the value of O(pattern_type), this is a list of prefixes, suffixes, or regular expressions
that will be used to match all variables that should be merged.
required: true
type: list
@@ -45,11 +46,11 @@ DOCUMENTATION = """
override:
description:
- Return an error, print a warning or ignore it when a key will be overwritten.
- - The default behavior C(error) makes the plugin fail when a key would be overwritten.
- - When C(warn) and C(ignore) are used, note that it is important to know that the variables
+ - The default behavior V(error) makes the plugin fail when a key would be overwritten.
+ - When V(warn) and V(ignore) are used, note that it is important to know that the variables
are sorted by name before being merged. Keys for later variables in this order will overwrite
keys of the same name for variables earlier in this order. To avoid potential confusion,
- better use I(override=error) whenever possible.
+ better use O(override=error) whenever possible.
type: str
default: 'error'
choices:
@@ -61,6 +62,13 @@ DOCUMENTATION = """
ini:
- section: merge_variables_lookup
key: override
+ groups:
+ description:
+ - Search for variables accross hosts that belong to the given groups. This allows to collect configuration pieces
+ accross different hosts (for example a service on a host with its database on another host).
+ type: list
+ elements: str
+ version_added: 8.5.0
"""
EXAMPLES = """
@@ -131,22 +139,39 @@ def _verify_and_get_type(variable):
class LookupModule(LookupBase):
-
def run(self, terms, variables=None, **kwargs):
self.set_options(direct=kwargs)
initial_value = self.get_option("initial_value", None)
self._override = self.get_option('override', 'error')
self._pattern_type = self.get_option('pattern_type', 'regex')
+ self._groups = self.get_option('groups', None)
ret = []
for term in terms:
if not isinstance(term, str):
raise AnsibleError("Non-string type '{0}' passed, only 'str' types are allowed!".format(type(term)))
- ret.append(self._merge_vars(term, initial_value, variables))
+ if not self._groups: # consider only own variables
+ ret.append(self._merge_vars(term, initial_value, variables))
+ else: # consider variables of hosts in given groups
+ cross_host_merge_result = initial_value
+ for host in variables["hostvars"]:
+ if self._is_host_in_allowed_groups(variables["hostvars"][host]["group_names"]):
+ cross_host_merge_result = self._merge_vars(term, cross_host_merge_result, variables["hostvars"][host])
+ ret.append(cross_host_merge_result)
return ret
+ def _is_host_in_allowed_groups(self, host_groups):
+ if 'all' in self._groups:
+ return True
+
+ group_intersection = [host_group_name for host_group_name in host_groups if host_group_name in self._groups]
+ if group_intersection:
+ return True
+
+ return False
+
def _var_matches(self, key, search_pattern):
if self._pattern_type == "prefix":
return key.startswith(search_pattern)
@@ -162,7 +187,6 @@ class LookupModule(LookupBase):
display.vvv("Merge variables with {0}: {1}".format(self._pattern_type, search_pattern))
var_merge_names = sorted([key for key in variables.keys() if self._var_matches(key, search_pattern)])
display.vvv("The following variables will be merged: {0}".format(var_merge_names))
-
prev_var_type = None
result = None
diff --git a/ansible_collections/community/general/plugins/lookup/onepassword.py b/ansible_collections/community/general/plugins/lookup/onepassword.py
index 0e78e4b1a..8ca95de0b 100644
--- a/ansible_collections/community/general/plugins/lookup/onepassword.py
+++ b/ansible_collections/community/general/plugins/lookup/onepassword.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2018, Scott Buchanan <sbuchanan@ri.pn>
+# Copyright (c) 2018, Scott Buchanan <scott@buchanan.works>
# Copyright (c) 2016, Andrew Zenk <azenk@umn.edu> (lastpass.py used as starting point)
# Copyright (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -14,48 +14,28 @@ DOCUMENTATION = '''
- Scott Buchanan (@scottsb)
- Andrew Zenk (@azenk)
- Sam Doran (@samdoran)
- requirements:
- - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/)
- short_description: fetch field values from 1Password
+ short_description: Fetch field values from 1Password
description:
- - C(onepassword) wraps the C(op) command line utility to fetch specific field values from 1Password.
+ - P(community.general.onepassword#lookup) wraps the C(op) command line utility to fetch specific field values from 1Password.
+ requirements:
+ - C(op) 1Password command line utility
options:
_terms:
- description: identifier(s) (UUID, name, or subdomain; case-insensitive) of item(s) to retrieve.
+ description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve.
required: true
- field:
- description: field to return from each matching item (case-insensitive).
- default: 'password'
- master_password:
- description: The password used to unlock the specified vault.
- aliases: ['vault_password']
- section:
- description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from any section.
+ account_id:
+ version_added: 7.5.0
domain:
- description: Domain of 1Password.
version_added: 3.2.0
- default: '1password.com'
+ field:
+ description: Field to return from each matching item (case-insensitive).
+ default: 'password'
type: str
- subdomain:
- description: The 1Password subdomain to authenticate against.
- username:
- description: The username used to sign in.
- secret_key:
- description: The secret key used when performing an initial sign in.
- vault:
- description: Vault containing the item to retrieve (case-insensitive). If absent will search all vaults.
- notes:
- - This lookup will use an existing 1Password session if one exists. If not, and you have already
- performed an initial sign in (meaning C(~/.op/config), C(~/.config/op/config) or C(~/.config/.op/config) exists), then only the
- C(master_password) is required. You may optionally specify C(subdomain) in this scenario, otherwise the last used subdomain will be used by C(op).
- - This lookup can perform an initial login by providing C(subdomain), C(username), C(secret_key), and C(master_password).
- - Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal credentials
- needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or greater in strength
- to the 1Password master password.
- - This lookup stores potentially sensitive data from 1Password as Ansible facts.
- Facts are subject to caching if enabled, which means this data could be stored in clear text
- on disk or in a database.
- - Tested with C(op) version 2.7.2
+ service_account_token:
+ version_added: 7.1.0
+ extends_documentation_fragment:
+ - community.general.onepassword
+ - community.general.onepassword.lookup
'''
EXAMPLES = """
@@ -74,24 +54,30 @@ EXAMPLES = """
- name: Retrieve password for HAL when not signed in to 1Password
ansible.builtin.debug:
- var: lookup('community.general.onepassword'
- 'HAL 9000'
- subdomain='Discovery'
+ var: lookup('community.general.onepassword',
+ 'HAL 9000',
+ subdomain='Discovery',
master_password=vault_master_password)
- name: Retrieve password for HAL when never signed in to 1Password
ansible.builtin.debug:
- var: lookup('community.general.onepassword'
- 'HAL 9000'
- subdomain='Discovery'
- master_password=vault_master_password
- username='tweety@acme.com'
+ var: lookup('community.general.onepassword',
+ 'HAL 9000',
+ subdomain='Discovery',
+ master_password=vault_master_password,
+ username='tweety@acme.com',
secret_key=vault_secret_key)
+
+- name: Retrieve password from specific account
+ ansible.builtin.debug:
+ var: lookup('community.general.onepassword',
+ 'HAL 9000',
+ account_id='abc123')
"""
RETURN = """
_raw:
- description: field data requested
+ description: Field data requested.
type: list
elements: str
"""
@@ -102,7 +88,7 @@ import json
import subprocess
from ansible.plugins.lookup import LookupBase
-from ansible.errors import AnsibleLookupError
+from ansible.errors import AnsibleLookupError, AnsibleOptionsError
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible.module_utils.six import with_metaclass
@@ -110,15 +96,38 @@ from ansible.module_utils.six import with_metaclass
from ansible_collections.community.general.plugins.module_utils.onepassword import OnePasswordConfig
+def _lower_if_possible(value):
+ """Return the lower case version value, otherwise return the value"""
+ try:
+ return value.lower()
+ except AttributeError:
+ return value
+
+
class OnePassCLIBase(with_metaclass(abc.ABCMeta, object)):
bin = "op"
- def __init__(self, subdomain=None, domain="1password.com", username=None, secret_key=None, master_password=None):
+ def __init__(
+ self,
+ subdomain=None,
+ domain="1password.com",
+ username=None,
+ secret_key=None,
+ master_password=None,
+ service_account_token=None,
+ account_id=None,
+ connect_host=None,
+ connect_token=None,
+ ):
self.subdomain = subdomain
self.domain = domain
self.username = username
self.master_password = master_password
self.secret_key = secret_key
+ self.service_account_token = service_account_token
+ self.account_id = account_id
+ self.connect_host = connect_host
+ self.connect_token = connect_token
self._path = None
self._version = None
@@ -286,7 +295,9 @@ class OnePassCLIv1(OnePassCLIBase):
def assert_logged_in(self):
args = ["get", "account"]
- if self.subdomain:
+ if self.account_id:
+ args.extend(["--account", self.account_id])
+ elif self.subdomain:
account = "{subdomain}.{domain}".format(subdomain=self.subdomain, domain=self.domain)
args.extend(["--account", account])
@@ -295,6 +306,14 @@ class OnePassCLIv1(OnePassCLIBase):
return not bool(rc)
def full_signin(self):
+ if self.connect_host or self.connect_token:
+ raise AnsibleLookupError(
+ "1Password Connect is not available with 1Password CLI version 1. Please use version 2 or later.")
+
+ if self.service_account_token:
+ raise AnsibleLookupError(
+ "1Password CLI version 1 does not support Service Accounts. Please use version 2 or later.")
+
required_params = [
"subdomain",
"username",
@@ -315,6 +334,10 @@ class OnePassCLIv1(OnePassCLIBase):
def get_raw(self, item_id, vault=None, token=None):
args = ["get", "item", item_id]
+
+ if self.account_id:
+ args.extend(["--account", self.account_id])
+
if vault is not None:
args += ["--vault={0}".format(vault)]
@@ -442,6 +465,7 @@ class OnePassCLIv2(OnePassCLIBase):
}
"""
data = json.loads(data_json)
+ field_name = _lower_if_possible(field_name)
for field in data.get("fields", []):
if section_title is None:
# If the field name exists in the section, return that value
@@ -450,28 +474,40 @@ class OnePassCLIv2(OnePassCLIBase):
# If the field name doesn't exist in the section, match on the value of "label"
# then "id" and return "value"
- if field.get("label") == field_name:
- return field["value"]
+ if field.get("label", "").lower() == field_name:
+ return field.get("value", "")
- if field.get("id") == field_name:
- return field["value"]
+ if field.get("id", "").lower() == field_name:
+ return field.get("value", "")
- # Look at the section data and get an indentifier. The value of 'id' is either a unique ID
+ # Look at the section data and get an identifier. The value of 'id' is either a unique ID
# or a human-readable string. If a 'label' field exists, prefer that since
# it is the value visible in the 1Password UI when both 'id' and 'label' exist.
section = field.get("section", {})
- current_section_title = section.get("label", section.get("id"))
+ section_title = _lower_if_possible(section_title)
+
+ current_section_title = section.get("label", section.get("id", "")).lower()
if section_title == current_section_title:
# In the correct section. Check "label" then "id" for the desired field_name
- if field.get("label") == field_name:
- return field["value"]
+ if field.get("label", "").lower() == field_name:
+ return field.get("value", "")
- if field.get("id") == field_name:
- return field["value"]
+ if field.get("id", "").lower() == field_name:
+ return field.get("value", "")
return ""
def assert_logged_in(self):
+ if self.connect_host and self.connect_token:
+ return True
+
+ if self.service_account_token:
+ args = ["whoami"]
+ environment_update = {"OP_SERVICE_ACCOUNT_TOKEN": self.service_account_token}
+ rc, out, err = self._run(args, environment_update=environment_update)
+
+ return not bool(rc)
+
args = ["account", "list"]
if self.subdomain:
account = "{subdomain}.{domain}".format(subdomain=self.subdomain, domain=self.domain)
@@ -484,7 +520,9 @@ class OnePassCLIv2(OnePassCLIBase):
# an interactive prompt. Only run 'op account get' after first listing accounts to see
# if there are any previously configured accounts.
args = ["account", "get"]
- if self.subdomain:
+ if self.account_id:
+ args.extend(["--account", self.account_id])
+ elif self.subdomain:
account = "{subdomain}.{domain}".format(subdomain=self.subdomain, domain=self.domain)
args.extend(["--account", account])
@@ -515,8 +553,28 @@ class OnePassCLIv2(OnePassCLIBase):
def get_raw(self, item_id, vault=None, token=None):
args = ["item", "get", item_id, "--format", "json"]
+
+ if self.account_id:
+ args.extend(["--account", self.account_id])
+
if vault is not None:
args += ["--vault={0}".format(vault)]
+
+ if self.connect_host and self.connect_token:
+ if vault is None:
+ raise AnsibleLookupError("'vault' is required with 1Password Connect")
+ environment_update = {
+ "OP_CONNECT_HOST": self.connect_host,
+ "OP_CONNECT_TOKEN": self.connect_token,
+ }
+ return self._run(args, environment_update=environment_update)
+
+ if self.service_account_token:
+ if vault is None:
+ raise AnsibleLookupError("'vault' is required with 'service_account_token'")
+ environment_update = {"OP_SERVICE_ACCOUNT_TOKEN": self.service_account_token}
+ return self._run(args, environment_update=environment_update)
+
if token is not None:
args += [to_bytes("--session=") + token]
@@ -533,25 +591,37 @@ class OnePassCLIv2(OnePassCLIBase):
class OnePass(object):
- def __init__(self, subdomain=None, domain="1password.com", username=None, secret_key=None, master_password=None):
+ def __init__(self, subdomain=None, domain="1password.com", username=None, secret_key=None, master_password=None,
+ service_account_token=None, account_id=None, connect_host=None, connect_token=None, cli_class=None):
self.subdomain = subdomain
self.domain = domain
self.username = username
self.secret_key = secret_key
self.master_password = master_password
+ self.service_account_token = service_account_token
+ self.account_id = account_id
+ self.connect_host = connect_host
+ self.connect_token = connect_token
self.logged_in = False
self.token = None
self._config = OnePasswordConfig()
- self._cli = self._get_cli_class()
+ self._cli = self._get_cli_class(cli_class)
+
+ if (self.connect_host or self.connect_token) and None in (self.connect_host, self.connect_token):
+ raise AnsibleOptionsError("connect_host and connect_token are required together")
+
+ def _get_cli_class(self, cli_class=None):
+ if cli_class is not None:
+ return cli_class(self.subdomain, self.domain, self.username, self.secret_key, self.master_password, self.service_account_token)
- def _get_cli_class(self):
version = OnePassCLIBase.get_current_version()
for cls in OnePassCLIBase.__subclasses__():
if cls.supports_version == version.split(".")[0]:
try:
- return cls(self.subdomain, self.domain, self.username, self.secret_key, self.master_password)
+ return cls(self.subdomain, self.domain, self.username, self.secret_key, self.master_password, self.service_account_token,
+ self.account_id, self.connect_host, self.connect_token)
except TypeError as e:
raise AnsibleLookupError(e)
@@ -614,8 +684,22 @@ class LookupModule(LookupBase):
username = self.get_option("username")
secret_key = self.get_option("secret_key")
master_password = self.get_option("master_password")
-
- op = OnePass(subdomain, domain, username, secret_key, master_password)
+ service_account_token = self.get_option("service_account_token")
+ account_id = self.get_option("account_id")
+ connect_host = self.get_option("connect_host")
+ connect_token = self.get_option("connect_token")
+
+ op = OnePass(
+ subdomain=subdomain,
+ domain=domain,
+ username=username,
+ secret_key=secret_key,
+ master_password=master_password,
+ service_account_token=service_account_token,
+ account_id=account_id,
+ connect_host=connect_host,
+ connect_token=connect_token,
+ )
op.assert_logged_in()
values = []
diff --git a/ansible_collections/community/general/plugins/lookup/onepassword_doc.py b/ansible_collections/community/general/plugins/lookup/onepassword_doc.py
new file mode 100644
index 000000000..ab24795df
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/onepassword_doc.py
@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2023, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: onepassword_doc
+ author:
+ - Sam Doran (@samdoran)
+ requirements:
+ - C(op) 1Password command line utility version 2 or later.
+ short_description: Fetch documents stored in 1Password
+ version_added: "8.1.0"
+ description:
+ - P(community.general.onepassword_doc#lookup) wraps C(op) command line utility to fetch one or more documents from 1Password.
+ notes:
+ - The document contents are a string exactly as stored in 1Password.
+ - This plugin requires C(op) version 2 or later.
+
+ options:
+ _terms:
+ description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve.
+ required: true
+
+ extends_documentation_fragment:
+ - community.general.onepassword
+ - community.general.onepassword.lookup
+'''
+
+EXAMPLES = """
+- name: Retrieve a private key from 1Password
+ ansible.builtin.debug:
+ var: lookup('community.general.onepassword_doc', 'Private key')
+"""
+
+RETURN = """
+ _raw:
+ description: Requested document
+ type: list
+ elements: string
+"""
+
+from ansible_collections.community.general.plugins.lookup.onepassword import OnePass, OnePassCLIv2
+from ansible.errors import AnsibleLookupError
+from ansible.module_utils.common.text.converters import to_bytes
+from ansible.plugins.lookup import LookupBase
+
+
+class OnePassCLIv2Doc(OnePassCLIv2):
+ def get_raw(self, item_id, vault=None, token=None):
+ args = ["document", "get", item_id]
+ if vault is not None:
+ args = [*args, "--vault={0}".format(vault)]
+
+ if self.service_account_token:
+ if vault is None:
+ raise AnsibleLookupError("'vault' is required with 'service_account_token'")
+
+ environment_update = {"OP_SERVICE_ACCOUNT_TOKEN": self.service_account_token}
+ return self._run(args, environment_update=environment_update)
+
+ if token is not None:
+ args = [*args, to_bytes("--session=") + token]
+
+ return self._run(args)
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables=None, **kwargs):
+ self.set_options(var_options=variables, direct=kwargs)
+
+ vault = self.get_option("vault")
+ subdomain = self.get_option("subdomain")
+ domain = self.get_option("domain", "1password.com")
+ username = self.get_option("username")
+ secret_key = self.get_option("secret_key")
+ master_password = self.get_option("master_password")
+ service_account_token = self.get_option("service_account_token")
+ account_id = self.get_option("account_id")
+ connect_host = self.get_option("connect_host")
+ connect_token = self.get_option("connect_token")
+
+ op = OnePass(
+ subdomain=subdomain,
+ domain=domain,
+ username=username,
+ secret_key=secret_key,
+ master_password=master_password,
+ service_account_token=service_account_token,
+ account_id=account_id,
+ connect_host=connect_host,
+ connect_token=connect_token,
+ cli_class=OnePassCLIv2Doc,
+ )
+ op.assert_logged_in()
+
+ values = []
+ for term in terms:
+ values.append(op.get_raw(term, vault))
+
+ return values
diff --git a/ansible_collections/community/general/plugins/lookup/onepassword_raw.py b/ansible_collections/community/general/plugins/lookup/onepassword_raw.py
index 9b87a3f61..3eef535a1 100644
--- a/ansible_collections/community/general/plugins/lookup/onepassword_raw.py
+++ b/ansible_collections/community/general/plugins/lookup/onepassword_raw.py
@@ -15,44 +15,23 @@ DOCUMENTATION = '''
- Andrew Zenk (@azenk)
- Sam Doran (@samdoran)
requirements:
- - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/)
- short_description: fetch an entire item from 1Password
+ - C(op) 1Password command line utility
+ short_description: Fetch an entire item from 1Password
description:
- - C(onepassword_raw) wraps C(op) command line utility to fetch an entire item from 1Password
+ - P(community.general.onepassword_raw#lookup) wraps C(op) command line utility to fetch an entire item from 1Password.
options:
_terms:
- description: identifier(s) (UUID, name, or domain; case-insensitive) of item(s) to retrieve.
+ description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve.
required: true
- master_password:
- description: The password used to unlock the specified vault.
- aliases: ['vault_password']
- section:
- description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from any section.
- subdomain:
- description: The 1Password subdomain to authenticate against.
+ account_id:
+ version_added: 7.5.0
domain:
- description: Domain of 1Password.
version_added: 6.0.0
- default: '1password.com'
- type: str
- username:
- description: The username used to sign in.
- secret_key:
- description: The secret key used when performing an initial sign in.
- vault:
- description: Vault containing the item to retrieve (case-insensitive). If absent will search all vaults.
- notes:
- - This lookup will use an existing 1Password session if one exists. If not, and you have already
- performed an initial sign in (meaning C(~/.op/config exists)), then only the C(master_password) is required.
- You may optionally specify C(subdomain) in this scenario, otherwise the last used subdomain will be used by C(op).
- - This lookup can perform an initial login by providing C(subdomain), C(username), C(secret_key), and C(master_password).
- - Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal credentials
- needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or greater in strength
- to the 1Password master password.
- - This lookup stores potentially sensitive data from 1Password as Ansible facts.
- Facts are subject to caching if enabled, which means this data could be stored in clear text
- on disk or in a database.
- - Tested with C(op) version 2.7.0
+ service_account_token:
+ version_added: 7.1.0
+ extends_documentation_fragment:
+ - community.general.onepassword
+ - community.general.onepassword.lookup
'''
EXAMPLES = """
@@ -67,7 +46,7 @@ EXAMPLES = """
RETURN = """
_raw:
- description: field data requested
+ description: Entire item requested.
type: list
elements: dict
"""
@@ -89,8 +68,22 @@ class LookupModule(LookupBase):
username = self.get_option("username")
secret_key = self.get_option("secret_key")
master_password = self.get_option("master_password")
+ service_account_token = self.get_option("service_account_token")
+ account_id = self.get_option("account_id")
+ connect_host = self.get_option("connect_host")
+ connect_token = self.get_option("connect_token")
- op = OnePass(subdomain, domain, username, secret_key, master_password)
+ op = OnePass(
+ subdomain=subdomain,
+ domain=domain,
+ username=username,
+ secret_key=secret_key,
+ master_password=master_password,
+ service_account_token=service_account_token,
+ account_id=account_id,
+ connect_host=connect_host,
+ connect_token=connect_token,
+ )
op.assert_logged_in()
values = []
diff --git a/ansible_collections/community/general/plugins/lookup/passwordstore.py b/ansible_collections/community/general/plugins/lookup/passwordstore.py
index 7e37a3785..7a6fca7a0 100644
--- a/ansible_collections/community/general/plugins/lookup/passwordstore.py
+++ b/ansible_collections/community/general/plugins/lookup/passwordstore.py
@@ -16,7 +16,7 @@ DOCUMENTATION = '''
- Enables Ansible to retrieve, create or update passwords from the passwordstore.org pass utility.
It also retrieves YAML style keys stored as multilines in the passwordfile.
- To avoid problems when accessing multiple secrets at once, add C(auto-expand-secmem) to
- C(~/.gnupg/gpg-agent.conf). Where this is not possible, consider using I(lock=readwrite) instead.
+ C(~/.gnupg/gpg-agent.conf). Where this is not possible, consider using O(lock=readwrite) instead.
options:
_terms:
description: query key.
@@ -24,16 +24,16 @@ DOCUMENTATION = '''
directory:
description:
- The directory of the password store.
- - If I(backend=pass), the default is C(~/.password-store) is used.
- - If I(backend=gopass), then the default is the C(path) field in C(~/.config/gopass/config.yml),
- falling back to C(~/.local/share/gopass/stores/root) if C(path) is not defined in the gopass config.
+ - If O(backend=pass), the default is V(~/.password-store) is used.
+ - If O(backend=gopass), then the default is the C(path) field in C(~/.config/gopass/config.yml),
+ falling back to V(~/.local/share/gopass/stores/root) if C(path) is not defined in the gopass config.
type: path
vars:
- name: passwordstore
env:
- name: PASSWORD_STORE_DIR
create:
- description: Create the password if it does not already exist. Takes precedence over C(missing).
+ description: Create the password if it does not already exist. Takes precedence over O(missing).
type: bool
default: false
overwrite:
@@ -43,7 +43,7 @@ DOCUMENTATION = '''
umask:
description:
- Sets the umask for the created .gpg files. The first octed must be greater than 3 (user readable).
- - Note pass' default value is C('077').
+ - Note pass' default value is V('077').
env:
- name: PASSWORD_STORE_UMASK
version_added: 1.3.0
@@ -52,7 +52,7 @@ DOCUMENTATION = '''
type: bool
default: false
subkey:
- description: Return a specific subkey of the password. When set to C(password), always returns the first line.
+ description: Return a specific subkey of the password. When set to V(password), always returns the first line.
type: str
default: password
userpass:
@@ -63,7 +63,7 @@ DOCUMENTATION = '''
type: integer
default: 16
backup:
- description: Used with C(overwrite=true). Backup the previous password in a subkey.
+ description: Used with O(overwrite=true). Backup the previous password in a subkey.
type: bool
default: false
nosymbols:
@@ -73,10 +73,10 @@ DOCUMENTATION = '''
missing:
description:
- List of preference about what to do if the password file is missing.
- - If I(create=true), the value for this option is ignored and assumed to be C(create).
- - If set to C(error), the lookup will error out if the passname does not exist.
- - If set to C(create), the passname will be created with the provided length I(length) if it does not exist.
- - If set to C(empty) or C(warn), will return a C(none) in case the passname does not exist.
+ - If O(create=true), the value for this option is ignored and assumed to be V(create).
+ - If set to V(error), the lookup will error out if the passname does not exist.
+ - If set to V(create), the passname will be created with the provided length O(length) if it does not exist.
+ - If set to V(empty) or V(warn), will return a V(none) in case the passname does not exist.
When using C(lookup) and not C(query), this will be translated to an empty string.
version_added: 3.1.0
type: str
@@ -89,9 +89,9 @@ DOCUMENTATION = '''
lock:
description:
- How to synchronize operations.
- - The default of C(write) only synchronizes write operations.
- - C(readwrite) synchronizes all operations (including read). This makes sure that gpg-agent is never called in parallel.
- - C(none) does not do any synchronization.
+ - The default of V(write) only synchronizes write operations.
+ - V(readwrite) synchronizes all operations (including read). This makes sure that gpg-agent is never called in parallel.
+ - V(none) does not do any synchronization.
ini:
- section: passwordstore_lookup
key: lock
@@ -104,8 +104,8 @@ DOCUMENTATION = '''
version_added: 4.5.0
locktimeout:
description:
- - Lock timeout applied when I(lock) is not C(none).
- - Time with a unit suffix, C(s), C(m), C(h) for seconds, minutes, and hours, respectively. For example, C(900s) equals C(15m).
+ - Lock timeout applied when O(lock) is not V(none).
+ - Time with a unit suffix, V(s), V(m), V(h) for seconds, minutes, and hours, respectively. For example, V(900s) equals V(15m).
- Correlates with C(pinentry-timeout) in C(~/.gnupg/gpg-agent.conf), see C(man gpg-agent) for details.
ini:
- section: passwordstore_lookup
@@ -116,8 +116,8 @@ DOCUMENTATION = '''
backend:
description:
- Specify which backend to use.
- - Defaults to C(pass), passwordstore.org's original pass utility.
- - C(gopass) support is incomplete.
+ - Defaults to V(pass), passwordstore.org's original pass utility.
+ - V(gopass) support is incomplete.
ini:
- section: passwordstore_lookup
key: backend
@@ -129,6 +129,16 @@ DOCUMENTATION = '''
- pass
- gopass
version_added: 5.2.0
+ timestamp:
+ description: Add the password generation information to the end of the file.
+ type: bool
+ default: true
+ version_added: 8.1.0
+ preserve:
+ description: Include the old (edited) password inside the pass file.
+ type: bool
+ default: true
+ version_added: 8.1.0
notes:
- The lookup supports passing all options as lookup parameters since community.general 6.0.0.
'''
@@ -386,11 +396,13 @@ class LookupModule(LookupBase):
# generate new password, insert old lines from current result and return new password
newpass = self.get_newpass()
datetime = time.strftime("%d/%m/%Y %H:%M:%S")
- msg = newpass + '\n'
- if self.passoutput[1:]:
- msg += '\n'.join(self.passoutput[1:]) + '\n'
- if self.paramvals['backup']:
- msg += "lookup_pass: old password was {0} (Updated on {1})\n".format(self.password, datetime)
+ msg = newpass
+ if self.paramvals['preserve'] or self.paramvals['timestamp']:
+ msg += '\n'
+ if self.paramvals['preserve'] and self.passoutput[1:]:
+ msg += '\n'.join(self.passoutput[1:]) + '\n'
+ if self.paramvals['timestamp'] and self.paramvals['backup']:
+ msg += "lookup_pass: old password was {0} (Updated on {1})\n".format(self.password, datetime)
try:
check_output2([self.pass_cmd, 'insert', '-f', '-m', self.passname], input=msg, env=self.env)
except (subprocess.CalledProcessError) as e:
@@ -402,7 +414,9 @@ class LookupModule(LookupBase):
# use pwgen to generate the password and insert values with pass -m
newpass = self.get_newpass()
datetime = time.strftime("%d/%m/%Y %H:%M:%S")
- msg = newpass + '\n' + "lookup_pass: First generated by ansible on {0}\n".format(datetime)
+ msg = newpass
+ if self.paramvals['timestamp']:
+ msg += '\n' + "lookup_pass: First generated by ansible on {0}\n".format(datetime)
try:
check_output2([self.pass_cmd, 'insert', '-f', '-m', self.passname], input=msg, env=self.env)
except (subprocess.CalledProcessError) as e:
@@ -465,6 +479,8 @@ class LookupModule(LookupBase):
'backup': self.get_option('backup'),
'missing': self.get_option('missing'),
'umask': self.get_option('umask'),
+ 'timestamp': self.get_option('timestamp'),
+ 'preserve': self.get_option('preserve'),
}
def run(self, terms, variables, **kwargs):
diff --git a/ansible_collections/community/general/plugins/lookup/random_string.py b/ansible_collections/community/general/plugins/lookup/random_string.py
index 199aa1396..d3b29629d 100644
--- a/ansible_collections/community/general/plugins/lookup/random_string.py
+++ b/ansible_collections/community/general/plugins/lookup/random_string.py
@@ -16,6 +16,8 @@ DOCUMENTATION = r"""
version_added: '3.2.0'
description:
- Generates random string based upon the given constraints.
+ - Uses L(random.SystemRandom,https://docs.python.org/3/library/random.html#random.SystemRandom),
+ so should be strong enough for cryptographic purposes.
options:
length:
description: The length of the string.
@@ -42,25 +44,25 @@ DOCUMENTATION = r"""
- Special characters are taken from Python standard library C(string).
See L(the documentation of string.punctuation,https://docs.python.org/3/library/string.html#string.punctuation)
for which characters will be used.
- - The choice of special characters can be changed to setting I(override_special).
+ - The choice of special characters can be changed to setting O(override_special).
default: true
type: bool
min_numeric:
description:
- Minimum number of numeric characters in the string.
- - If set, overrides I(numbers=false).
+ - If set, overrides O(numbers=false).
default: 0
type: int
min_upper:
description:
- Minimum number of uppercase alphabets in the string.
- - If set, overrides I(upper=false).
+ - If set, overrides O(upper=false).
default: 0
type: int
min_lower:
description:
- Minimum number of lowercase alphabets in the string.
- - If set, overrides I(lower=false).
+ - If set, overrides O(lower=false).
default: 0
type: int
min_special:
@@ -70,14 +72,27 @@ DOCUMENTATION = r"""
type: int
override_special:
description:
- - Overide a list of special characters to use in the string.
- - If set I(min_special) should be set to a non-default value.
+ - Override a list of special characters to use in the string.
+ - If set O(min_special) should be set to a non-default value.
type: str
override_all:
description:
- - Override all values of I(numbers), I(upper), I(lower), and I(special) with
+ - Override all values of O(numbers), O(upper), O(lower), and O(special) with
the given list of characters.
type: str
+ ignore_similar_chars:
+ description:
+ - Ignore similar characters, such as V(l) and V(1), or V(O) and V(0).
+ - These characters can be configured in O(similar_chars).
+ default: false
+ type: bool
+ version_added: 7.5.0
+ similar_chars:
+ description:
+ - Override a list of characters not to be use in the string.
+ default: "il1LoO0"
+ type: str
+ version_added: 7.5.0
base64:
description:
- Returns base64 encoded string.
@@ -101,7 +116,7 @@ EXAMPLES = r"""
var: lookup('community.general.random_string', base64=True)
# Example result: ['NHZ6eWN5Qk0=']
-- name: Generate a random string with 1 lower, 1 upper, 1 number and 1 special char (atleast)
+- name: Generate a random string with 1 lower, 1 upper, 1 number and 1 special char (at least)
ansible.builtin.debug:
var: lookup('community.general.random_string', min_lower=1, min_upper=1, min_special=1, min_numeric=1)
# Example result: ['&Qw2|E[-']
@@ -171,9 +186,17 @@ class LookupModule(LookupBase):
length = self.get_option("length")
base64_flag = self.get_option("base64")
override_all = self.get_option("override_all")
+ ignore_similar_chars = self.get_option("ignore_similar_chars")
+ similar_chars = self.get_option("similar_chars")
values = ""
available_chars_set = ""
+ if ignore_similar_chars:
+ number_chars = "".join([sc for sc in number_chars if sc not in similar_chars])
+ lower_chars = "".join([sc for sc in lower_chars if sc not in similar_chars])
+ upper_chars = "".join([sc for sc in upper_chars if sc not in similar_chars])
+ special_chars = "".join([sc for sc in special_chars if sc not in similar_chars])
+
if override_all:
# Override all the values
available_chars_set = override_all
diff --git a/ansible_collections/community/general/plugins/lookup/revbitspss.py b/ansible_collections/community/general/plugins/lookup/revbitspss.py
index 552970804..e4118e89e 100644
--- a/ansible_collections/community/general/plugins/lookup/revbitspss.py
+++ b/ansible_collections/community/general/plugins/lookup/revbitspss.py
@@ -25,7 +25,7 @@ options:
elements: string
base_url:
description:
- - This will be the base URL of the server, for example C(https://server-url-here).
+ - This will be the base URL of the server, for example V(https://server-url-here).
required: true
type: string
api_key:
@@ -100,7 +100,7 @@ class LookupModule(LookupBase):
result = []
for term in terms:
try:
- display.vvv(u"Secret Server lookup of Secret with ID %s" % term)
+ display.vvv("Secret Server lookup of Secret with ID %s" % term)
result.append({term: secret_server.get_pam_secret(term)})
except Exception as error:
raise AnsibleError("Secret Server lookup failure: %s" % error.message)
diff --git a/ansible_collections/community/general/plugins/lookup/tss.py b/ansible_collections/community/general/plugins/lookup/tss.py
index 935b5f4b4..80105ff71 100644
--- a/ansible_collections/community/general/plugins/lookup/tss.py
+++ b/ansible_collections/community/general/plugins/lookup/tss.py
@@ -13,10 +13,10 @@ short_description: Get secrets from Thycotic Secret Server
version_added: 1.0.0
description:
- Uses the Thycotic Secret Server Python SDK to get Secrets from Secret
- Server using token authentication with I(username) and I(password) on
- the REST API at I(base_url).
+ Server using token authentication with O(username) and O(password) on
+ the REST API at O(base_url).
- When using self-signed certificates the environment variable
- C(REQUESTS_CA_BUNDLE) can be set to a file containing the trusted certificates
+ E(REQUESTS_CA_BUNDLE) can be set to a file containing the trusted certificates
(in C(.pem) format).
- For example, C(export REQUESTS_CA_BUNDLE='/etc/ssl/certs/ca-bundle.trust.crt').
requirements:
@@ -26,8 +26,32 @@ options:
description: The integer ID of the secret.
required: true
type: int
+ secret_path:
+ description: Indicate a full path of secret including folder and secret name when the secret ID is set to 0.
+ required: false
+ type: str
+ version_added: 7.2.0
+ fetch_secret_ids_from_folder:
+ description:
+ - Boolean flag which indicates whether secret ids are in a folder is fetched by folder ID or not.
+ - V(true) then the terms will be considered as a folder IDs. Otherwise (default), they are considered as secret IDs.
+ required: false
+ type: bool
+ version_added: 7.1.0
+ fetch_attachments:
+ description:
+ - Boolean flag which indicates whether attached files will get downloaded or not.
+ - The download will only happen if O(file_download_path) has been provided.
+ required: false
+ type: bool
+ version_added: 7.0.0
+ file_download_path:
+ description: Indicate the file attachment download location.
+ required: false
+ type: path
+ version_added: 7.0.0
base_url:
- description: The base URL of the server, e.g. C(https://localhost/SecretServer).
+ description: The base URL of the server, for example V(https://localhost/SecretServer).
env:
- name: TSS_BASE_URL
ini:
@@ -44,7 +68,7 @@ options:
password:
description:
- The password associated with the supplied username.
- - Required when I(token) is not provided.
+ - Required when O(token) is not provided.
env:
- name: TSS_PASSWORD
ini:
@@ -54,7 +78,7 @@ options:
default: ""
description:
- The domain with which to request the OAuth2 Access Grant.
- - Optional when I(token) is not provided.
+ - Optional when O(token) is not provided.
- Requires C(python-tss-sdk) version 1.0.0 or greater.
env:
- name: TSS_DOMAIN
@@ -66,7 +90,7 @@ options:
token:
description:
- Existing token for Thycotic authorizer.
- - If provided, I(username) and I(password) are not needed.
+ - If provided, O(username) and O(password) are not needed.
- Requires C(python-tss-sdk) version 1.0.0 or greater.
env:
- name: TSS_TOKEN
@@ -157,39 +181,101 @@ EXAMPLES = r"""
tasks:
- ansible.builtin.debug:
msg: the password is {{ secret_password }}
+
+# Private key stores into certificate file which is attached with secret.
+# If fetch_attachments=True then private key file will be download on specified path
+# and file content will display in debug message.
+- hosts: localhost
+ vars:
+ secret: >-
+ {{
+ lookup(
+ 'community.general.tss',
+ 102,
+ fetch_attachments=True,
+ file_download_path='/home/certs',
+ base_url='https://secretserver.domain.com/SecretServer/',
+ token='thycotic_access_token'
+ )
+ }}
+ tasks:
+ - ansible.builtin.debug:
+ msg: >
+ the private key is {{
+ (secret['items']
+ | items2dict(key_name='slug',
+ value_name='itemValue'))['private-key']
+ }}
+
+# If fetch_secret_ids_from_folder=true then secret IDs are in a folder is fetched based on folder ID
+- hosts: localhost
+ vars:
+ secret: >-
+ {{
+ lookup(
+ 'community.general.tss',
+ 102,
+ fetch_secret_ids_from_folder=true,
+ base_url='https://secretserver.domain.com/SecretServer/',
+ token='thycotic_access_token'
+ )
+ }}
+ tasks:
+ - ansible.builtin.debug:
+ msg: >
+ the secret id's are {{
+ secret
+ }}
+
+# If secret ID is 0 and secret_path has value then secret is fetched by secret path
+- hosts: localhost
+ vars:
+ secret: >-
+ {{
+ lookup(
+ 'community.general.tss',
+ 0,
+ secret_path='\folderName\secretName'
+ base_url='https://secretserver.domain.com/SecretServer/',
+ username='user.name',
+ password='password'
+ )
+ }}
+ tasks:
+ - ansible.builtin.debug:
+ msg: >
+ the password is {{
+ (secret['items']
+ | items2dict(key_name='slug',
+ value_name='itemValue'))['password']
+ }}
"""
import abc
-
+import os
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.module_utils import six
from ansible.plugins.lookup import LookupBase
from ansible.utils.display import Display
try:
- from thycotic.secrets.server import SecretServer, SecretServerError
+ from delinea.secrets.server import SecretServer, SecretServerError, PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer, AccessTokenAuthorizer
HAS_TSS_SDK = True
+ HAS_DELINEA_SS_SDK = True
+ HAS_TSS_AUTHORIZER = True
except ImportError:
try:
- from delinea.secrets.server import SecretServer, SecretServerError
+ from thycotic.secrets.server import SecretServer, SecretServerError, PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer, AccessTokenAuthorizer
HAS_TSS_SDK = True
+ HAS_DELINEA_SS_SDK = False
+ HAS_TSS_AUTHORIZER = True
except ImportError:
SecretServer = None
SecretServerError = None
HAS_TSS_SDK = False
-
-try:
- from thycotic.secrets.server import PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer, AccessTokenAuthorizer
-
- HAS_TSS_AUTHORIZER = True
-except ImportError:
- try:
- from delinea.secrets.server import PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer, AccessTokenAuthorizer
-
- HAS_TSS_AUTHORIZER = True
- except ImportError:
+ HAS_DELINEA_SS_SDK = False
PasswordGrantAuthorizer = None
DomainPasswordGrantAuthorizer = None
AccessTokenAuthorizer = None
@@ -211,13 +297,49 @@ class TSSClient(object):
else:
return TSSClientV0(**server_parameters)
- def get_secret(self, term):
+ def get_secret(self, term, secret_path, fetch_file_attachments, file_download_path):
display.debug("tss_lookup term: %s" % term)
-
secret_id = self._term_to_secret_id(term)
- display.vvv(u"Secret Server lookup of Secret with ID %d" % secret_id)
+ if secret_id == 0 and secret_path:
+ fetch_secret_by_path = True
+ display.vvv(u"Secret Server lookup of Secret with path %s" % secret_path)
+ else:
+ fetch_secret_by_path = False
+ display.vvv(u"Secret Server lookup of Secret with ID %d" % secret_id)
+
+ if fetch_file_attachments:
+ if fetch_secret_by_path:
+ obj = self._client.get_secret_by_path(secret_path, fetch_file_attachments)
+ else:
+ obj = self._client.get_secret(secret_id, fetch_file_attachments)
+ for i in obj['items']:
+ if file_download_path and os.path.isdir(file_download_path):
+ if i['isFile']:
+ try:
+ file_content = i['itemValue'].content
+ with open(os.path.join(file_download_path, str(obj['id']) + "_" + i['slug']), "wb") as f:
+ f.write(file_content)
+ except ValueError:
+ raise AnsibleOptionsError("Failed to download {0}".format(str(i['slug'])))
+ except AttributeError:
+ display.warning("Could not read file content for {0}".format(str(i['slug'])))
+ finally:
+ i['itemValue'] = "*** Not Valid For Display ***"
+ else:
+ raise AnsibleOptionsError("File download path does not exist")
+ return obj
+ else:
+ if fetch_secret_by_path:
+ return self._client.get_secret_by_path(secret_path, False)
+ else:
+ return self._client.get_secret_json(secret_id)
+
+ def get_secret_ids_by_folderid(self, term):
+ display.debug("tss_lookup term: %s" % term)
+ folder_id = self._term_to_folder_id(term)
+ display.vvv(u"Secret Server lookup of Secret id's with Folder ID %d" % folder_id)
- return self._client.get_secret_json(secret_id)
+ return self._client.get_secret_ids_by_folderid(folder_id)
@staticmethod
def _term_to_secret_id(term):
@@ -226,6 +348,13 @@ class TSSClient(object):
except ValueError:
raise AnsibleOptionsError("Secret ID must be an integer")
+ @staticmethod
+ def _term_to_folder_id(term):
+ try:
+ return int(term)
+ except ValueError:
+ raise AnsibleOptionsError("Folder ID must be an integer")
+
class TSSClientV0(TSSClient):
def __init__(self, **server_parameters):
@@ -294,6 +423,20 @@ class LookupModule(LookupBase):
)
try:
- return [tss.get_secret(term) for term in terms]
+ if self.get_option("fetch_secret_ids_from_folder"):
+ if HAS_DELINEA_SS_SDK:
+ return [tss.get_secret_ids_by_folderid(term) for term in terms]
+ else:
+ raise AnsibleError("latest python-tss-sdk must be installed to use this plugin")
+ else:
+ return [
+ tss.get_secret(
+ term,
+ self.get_option("secret_path"),
+ self.get_option("fetch_attachments"),
+ self.get_option("file_download_path"),
+ )
+ for term in terms
+ ]
except SecretServerError as error:
raise AnsibleError("Secret Server lookup failure: %s" % error.message)
diff --git a/ansible_collections/community/general/plugins/module_utils/cmd_runner.py b/ansible_collections/community/general/plugins/module_utils/cmd_runner.py
index 21d61a6a5..864987120 100644
--- a/ansible_collections/community/general/plugins/module_utils/cmd_runner.py
+++ b/ansible_collections/community/general/plugins/module_utils/cmd_runner.py
@@ -6,6 +6,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
+import os
from functools import wraps
from ansible.module_utils.common.collections import is_sequence
@@ -147,6 +148,11 @@ class _Format(object):
@staticmethod
def as_default_type(_type, arg="", ignore_none=None):
+ #
+ # DEPRECATION: This method is deprecated and will be removed in community.general 10.0.0
+ #
+ # Instead of using the implicit formats provided here, use the explicit necessary format method.
+ #
fmt = _Format
if _type == "dict":
return fmt.as_func(lambda d: ["--{0}={1}".format(*a) for a in iteritems(d)], ignore_none=ignore_none)
@@ -199,11 +205,16 @@ class CmdRunner(object):
environ_update = {}
self.environ_update = environ_update
- self.command[0] = module.get_bin_path(self.command[0], opt_dirs=path_prefix, required=True)
+ _cmd = self.command[0]
+ self.command[0] = _cmd if (os.path.isabs(_cmd) or '/' in _cmd) else module.get_bin_path(_cmd, opt_dirs=path_prefix, required=True)
for mod_param_name, spec in iteritems(module.argument_spec):
if mod_param_name not in self.arg_formats:
- self.arg_formats[mod_param_name] = _Format.as_default_type(spec['type'], mod_param_name)
+ self.arg_formats[mod_param_name] = _Format.as_default_type(spec.get('type', 'str'), mod_param_name)
+
+ @property
+ def binary(self):
+ return self.command[0]
def __call__(self, args_order=None, output_process=None, ignore_value_none=True, check_mode_skip=False, check_mode_return=None, **kwargs):
if output_process is None:
@@ -309,11 +320,3 @@ class _CmdRunnerContext(object):
cmd_runner_fmt = _Format()
-
-#
-# The fmt form is deprecated and will be removed in community.general 7.0.0
-# Please use:
-# cmd_runner_fmt
-# Or, to retain the same effect, use:
-# from ansible_collections.community.general.plugins.module_utils.cmd_runner import cmd_runner_fmt as fmt
-fmt = cmd_runner_fmt
diff --git a/ansible_collections/community/general/plugins/module_utils/consul.py b/ansible_collections/community/general/plugins/module_utils/consul.py
new file mode 100644
index 000000000..68c1a130b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/consul.py
@@ -0,0 +1,321 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2022, HÃ¥kon Lerring
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import copy
+import json
+
+from ansible.module_utils.six.moves.urllib import error as urllib_error
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import open_url
+
+
+def get_consul_url(configuration):
+ return "%s://%s:%s/v1" % (
+ configuration.scheme,
+ configuration.host,
+ configuration.port,
+ )
+
+
+def get_auth_headers(configuration):
+ if configuration.token is None:
+ return {}
+ else:
+ return {"X-Consul-Token": configuration.token}
+
+
+class RequestError(Exception):
+ def __init__(self, status, response_data=None):
+ self.status = status
+ self.response_data = response_data
+
+ def __str__(self):
+ if self.response_data is None:
+ # self.status is already the message (backwards compat)
+ return self.status
+ return "HTTP %d: %s" % (self.status, self.response_data)
+
+
+def handle_consul_response_error(response):
+ if 400 <= response.status_code < 600:
+ raise RequestError("%d %s" % (response.status_code, response.content))
+
+
+AUTH_ARGUMENTS_SPEC = dict(
+ host=dict(default="localhost"),
+ port=dict(type="int", default=8500),
+ scheme=dict(default="http"),
+ validate_certs=dict(type="bool", default=True),
+ token=dict(no_log=True),
+ ca_path=dict(),
+)
+
+
+def camel_case_key(key):
+ parts = []
+ for part in key.split("_"):
+ if part in {"id", "ttl", "jwks", "jwt", "oidc", "iam", "sts"}:
+ parts.append(part.upper())
+ else:
+ parts.append(part.capitalize())
+ return "".join(parts)
+
+
+STATE_PARAMETER = "state"
+STATE_PRESENT = "present"
+STATE_ABSENT = "absent"
+
+OPERATION_READ = "read"
+OPERATION_CREATE = "create"
+OPERATION_UPDATE = "update"
+OPERATION_DELETE = "remove"
+
+
+def _normalize_params(params, arg_spec):
+ final_params = {}
+ for k, v in params.items():
+ if k not in arg_spec: # Alias
+ continue
+ spec = arg_spec[k]
+ if (
+ spec.get("type") == "list"
+ and spec.get("elements") == "dict"
+ and spec.get("options")
+ and v
+ ):
+ v = [_normalize_params(d, spec["options"]) for d in v]
+ elif spec.get("type") == "dict" and spec.get("options") and v:
+ v = _normalize_params(v, spec["options"])
+ final_params[k] = v
+ return final_params
+
+
+class _ConsulModule:
+ """Base class for Consul modules.
+
+ This class is considered private, till the API is fully fleshed out.
+ As such backwards incompatible changes can occur even in bugfix releases.
+ """
+
+ api_endpoint = None # type: str
+ unique_identifier = None # type: str
+ result_key = None # type: str
+ create_only_fields = set()
+ params = {}
+
+ def __init__(self, module):
+ self._module = module
+ self.params = _normalize_params(module.params, module.argument_spec)
+ self.api_params = {
+ k: camel_case_key(k)
+ for k in self.params
+ if k not in STATE_PARAMETER and k not in AUTH_ARGUMENTS_SPEC
+ }
+
+ def execute(self):
+ obj = self.read_object()
+
+ changed = False
+ diff = {}
+ if self.params[STATE_PARAMETER] == STATE_PRESENT:
+ obj_from_module = self.module_to_obj(obj is not None)
+ if obj is None:
+ operation = OPERATION_CREATE
+ new_obj = self.create_object(obj_from_module)
+ diff = {"before": {}, "after": new_obj}
+ changed = True
+ else:
+ operation = OPERATION_UPDATE
+ if self._needs_update(obj, obj_from_module):
+ new_obj = self.update_object(obj, obj_from_module)
+ diff = {"before": obj, "after": new_obj}
+ changed = True
+ else:
+ new_obj = obj
+ elif self.params[STATE_PARAMETER] == STATE_ABSENT:
+ operation = OPERATION_DELETE
+ if obj is not None:
+ self.delete_object(obj)
+ changed = True
+ diff = {"before": obj, "after": {}}
+ else:
+ diff = {"before": {}, "after": {}}
+ new_obj = None
+ else:
+ raise RuntimeError("Unknown state supplied.")
+
+ result = {"changed": changed}
+ if changed:
+ result["operation"] = operation
+ if self._module._diff:
+ result["diff"] = diff
+ if self.result_key:
+ result[self.result_key] = new_obj
+ self._module.exit_json(**result)
+
+ def module_to_obj(self, is_update):
+ obj = {}
+ for k, v in self.params.items():
+ result = self.map_param(k, v, is_update)
+ if result:
+ obj[result[0]] = result[1]
+ return obj
+
+ def map_param(self, k, v, is_update):
+ def helper(item):
+ return {camel_case_key(k): v for k, v in item.items()}
+
+ def needs_camel_case(k):
+ spec = self._module.argument_spec[k]
+ return (
+ spec.get("type") == "list"
+ and spec.get("elements") == "dict"
+ and spec.get("options")
+ ) or (spec.get("type") == "dict" and spec.get("options"))
+
+ if k in self.api_params and v is not None:
+ if isinstance(v, dict) and needs_camel_case(k):
+ v = helper(v)
+ elif isinstance(v, (list, tuple)) and needs_camel_case(k):
+ v = [helper(i) for i in v]
+ if is_update and k in self.create_only_fields:
+ return
+ return camel_case_key(k), v
+
+ def _needs_update(self, api_obj, module_obj):
+ api_obj = copy.deepcopy(api_obj)
+ module_obj = copy.deepcopy(module_obj)
+ return self.needs_update(api_obj, module_obj)
+
+ def needs_update(self, api_obj, module_obj):
+ for k, v in module_obj.items():
+ if k not in api_obj:
+ return True
+ if api_obj[k] != v:
+ return True
+ return False
+
+ def prepare_object(self, existing, obj):
+ operational_attributes = {"CreateIndex", "CreateTime", "Hash", "ModifyIndex"}
+ existing = {
+ k: v for k, v in existing.items() if k not in operational_attributes
+ }
+ for k, v in obj.items():
+ existing[k] = v
+ return existing
+
+ def endpoint_url(self, operation, identifier=None):
+ if operation == OPERATION_CREATE:
+ return self.api_endpoint
+ elif identifier:
+ return "/".join([self.api_endpoint, identifier])
+ raise RuntimeError("invalid arguments passed")
+
+ def read_object(self):
+ url = self.endpoint_url(OPERATION_READ, self.params.get(self.unique_identifier))
+ try:
+ return self.get(url)
+ except RequestError as e:
+ if e.status == 404:
+ return
+ elif e.status == 403 and b"ACL not found" in e.response_data:
+ return
+ raise
+
+ def create_object(self, obj):
+ if self._module.check_mode:
+ return obj
+ else:
+ return self.put(self.api_endpoint, data=self.prepare_object({}, obj))
+
+ def update_object(self, existing, obj):
+ url = self.endpoint_url(
+ OPERATION_UPDATE, existing.get(camel_case_key(self.unique_identifier))
+ )
+ merged_object = self.prepare_object(existing, obj)
+ if self._module.check_mode:
+ return merged_object
+ else:
+ return self.put(url, data=merged_object)
+
+ def delete_object(self, obj):
+ if self._module.check_mode:
+ return {}
+ else:
+ url = self.endpoint_url(
+ OPERATION_DELETE, obj.get(camel_case_key(self.unique_identifier))
+ )
+ return self.delete(url)
+
+ def _request(self, method, url_parts, data=None, params=None):
+ module_params = self.params
+
+ if not isinstance(url_parts, (tuple, list)):
+ url_parts = [url_parts]
+ if params:
+ # Remove values that are None
+ params = {k: v for k, v in params.items() if v is not None}
+
+ ca_path = module_params.get("ca_path")
+ base_url = "%s://%s:%s/v1" % (
+ module_params["scheme"],
+ module_params["host"],
+ module_params["port"],
+ )
+ url = "/".join([base_url] + list(url_parts))
+
+ headers = {}
+ token = self.params.get("token")
+ if token:
+ headers["X-Consul-Token"] = token
+
+ try:
+ if data is not None:
+ data = json.dumps(data)
+ headers["Content-Type"] = "application/json"
+ if params:
+ url = "%s?%s" % (url, urlencode(params))
+ response = open_url(
+ url,
+ method=method,
+ data=data,
+ headers=headers,
+ validate_certs=module_params["validate_certs"],
+ ca_path=ca_path,
+ )
+ response_data = response.read()
+ status = (
+ response.status if hasattr(response, "status") else response.getcode()
+ )
+
+ except urllib_error.URLError as e:
+ if isinstance(e, urllib_error.HTTPError):
+ status = e.code
+ response_data = e.fp.read()
+ else:
+ self._module.fail_json(
+ msg="Could not connect to consul agent at %s:%s, error was %s"
+ % (module_params["host"], module_params["port"], str(e))
+ )
+ raise
+
+ if 400 <= status < 600:
+ raise RequestError(status, response_data)
+
+ return json.loads(response_data)
+
+ def get(self, url_parts, **kwargs):
+ return self._request("GET", url_parts, **kwargs)
+
+ def put(self, url_parts, **kwargs):
+ return self._request("PUT", url_parts, **kwargs)
+
+ def delete(self, url_parts, **kwargs):
+ return self._request("DELETE", url_parts, **kwargs)
diff --git a/ansible_collections/community/general/plugins/module_utils/dimensiondata.py b/ansible_collections/community/general/plugins/module_utils/dimensiondata.py
index 0300f6c1e..a5201f7de 100644
--- a/ansible_collections/community/general/plugins/module_utils/dimensiondata.py
+++ b/ansible_collections/community/general/plugins/module_utils/dimensiondata.py
@@ -39,7 +39,7 @@ except ImportError:
LIBCLOUD_IMP_ERR = traceback.format_exc()
HAS_LIBCLOUD = False
-# MCP 2.x version patten for location (datacenter) names.
+# MCP 2.x version pattern for location (datacenter) names.
#
# Note that this is not a totally reliable way of determining MCP version.
# Unfortunately, libcloud's NodeLocation currently makes no provision for extended properties.
diff --git a/ansible_collections/community/general/plugins/module_utils/gio_mime.py b/ansible_collections/community/general/plugins/module_utils/gio_mime.py
new file mode 100644
index 000000000..e01709487
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/gio_mime.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2022, Alexei Znamensky <russoz@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
+
+
+def gio_mime_runner(module, **kwargs):
+ return CmdRunner(
+ module,
+ command=['gio', 'mime'],
+ arg_formats=dict(
+ mime_type=cmd_runner_fmt.as_list(),
+ handler=cmd_runner_fmt.as_list(),
+ ),
+ **kwargs
+ )
+
+
+def gio_mime_get(runner, mime_type):
+ def process(rc, out, err):
+ if err.startswith("No default applications for"):
+ return None
+ out = out.splitlines()[0]
+ return out.split()[-1]
+
+ with runner("mime_type", output_process=process) as ctx:
+ return ctx.run(mime_type=mime_type)
diff --git a/ansible_collections/community/general/plugins/module_utils/gitlab.py b/ansible_collections/community/general/plugins/module_utils/gitlab.py
index 7cb59e4c2..f9872b877 100644
--- a/ansible_collections/community/general/plugins/module_utils/gitlab.py
+++ b/ansible_collections/community/general/plugins/module_utils/gitlab.py
@@ -10,6 +10,7 @@ __metaclass__ = type
from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.six import integer_types, string_types
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
@@ -20,19 +21,35 @@ except ImportError:
import traceback
+
+def _determine_list_all_kwargs(version):
+ gitlab_version = LooseVersion(version)
+ if gitlab_version >= LooseVersion('4.0.0'):
+ # 4.0.0 removed 'as_list'
+ return {'iterator': True, 'per_page': 100}
+ elif gitlab_version >= LooseVersion('3.7.0'):
+ # 3.7.0 added 'get_all'
+ return {'as_list': False, 'get_all': True, 'per_page': 100}
+ else:
+ return {'as_list': False, 'all': True, 'per_page': 100}
+
+
GITLAB_IMP_ERR = None
try:
import gitlab
import requests
HAS_GITLAB_PACKAGE = True
+ list_all_kwargs = _determine_list_all_kwargs(gitlab.__version__)
except Exception:
gitlab = None
GITLAB_IMP_ERR = traceback.format_exc()
HAS_GITLAB_PACKAGE = False
+ list_all_kwargs = {}
def auth_argument_spec(spec=None):
arg_spec = (dict(
+ ca_path=dict(type='str'),
api_token=dict(type='str', no_log=True),
api_oauth_token=dict(type='str', no_log=True),
api_job_token=dict(type='str', no_log=True),
@@ -57,11 +74,11 @@ def find_project(gitlab_instance, identifier):
def find_group(gitlab_instance, identifier):
try:
- project = gitlab_instance.groups.get(identifier)
+ group = gitlab_instance.groups.get(identifier)
except Exception as e:
return None
- return project
+ return group
def ensure_gitlab_package(module):
@@ -73,33 +90,36 @@ def ensure_gitlab_package(module):
def gitlab_authentication(module):
+ ensure_gitlab_package(module)
+
gitlab_url = module.params['api_url']
validate_certs = module.params['validate_certs']
+ ca_path = module.params['ca_path']
gitlab_user = module.params['api_username']
gitlab_password = module.params['api_password']
gitlab_token = module.params['api_token']
gitlab_oauth_token = module.params['api_oauth_token']
gitlab_job_token = module.params['api_job_token']
- ensure_gitlab_package(module)
+ verify = ca_path if validate_certs and ca_path else validate_certs
try:
# python-gitlab library remove support for username/password authentication since 1.13.0
# Changelog : https://github.com/python-gitlab/python-gitlab/releases/tag/v1.13.0
# This condition allow to still support older version of the python-gitlab library
if LooseVersion(gitlab.__version__) < LooseVersion("1.13.0"):
- gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, email=gitlab_user, password=gitlab_password,
+ gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=verify, email=gitlab_user, password=gitlab_password,
private_token=gitlab_token, api_version=4)
else:
# We can create an oauth_token using a username and password
# https://docs.gitlab.com/ee/api/oauth2.html#authorization-code-flow
if gitlab_user:
data = {'grant_type': 'password', 'username': gitlab_user, 'password': gitlab_password}
- resp = requests.post(urljoin(gitlab_url, "oauth/token"), data=data, verify=validate_certs)
+ resp = requests.post(urljoin(gitlab_url, "oauth/token"), data=data, verify=verify)
resp_data = resp.json()
gitlab_oauth_token = resp_data["access_token"]
- gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, private_token=gitlab_token,
+ gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=verify, private_token=gitlab_token,
oauth_token=gitlab_oauth_token, job_token=gitlab_job_token, api_version=4)
gitlab_instance.auth()
@@ -115,9 +135,46 @@ def gitlab_authentication(module):
def filter_returned_variables(gitlab_variables):
# pop properties we don't know
existing_variables = [dict(x.attributes) for x in gitlab_variables]
- KNOWN = ['key', 'value', 'masked', 'protected', 'variable_type', 'environment_scope']
+ KNOWN = ['key', 'value', 'masked', 'protected', 'variable_type', 'environment_scope', 'raw']
for item in existing_variables:
for key in list(item.keys()):
if key not in KNOWN:
item.pop(key)
return existing_variables
+
+
+def vars_to_variables(vars, module):
+ # transform old vars to new variables structure
+ variables = list()
+ for item, value in vars.items():
+ if isinstance(value, (string_types, integer_types, float)):
+ variables.append(
+ {
+ "name": item,
+ "value": str(value),
+ "masked": False,
+ "protected": False,
+ "raw": False,
+ "variable_type": "env_var",
+ }
+ )
+
+ elif isinstance(value, dict):
+ new_item = {
+ "name": item,
+ "value": value.get('value'),
+ "masked": value.get('masked'),
+ "protected": value.get('protected'),
+ "raw": value.get('raw'),
+ "variable_type": value.get('variable_type'),
+ }
+
+ if value.get('environment_scope'):
+ new_item['environment_scope'] = value.get('environment_scope')
+
+ variables.append(new_item)
+
+ else:
+ module.fail_json(msg="value must be of type string, integer, float or dict")
+
+ return variables
diff --git a/ansible_collections/community/general/plugins/module_utils/hwc_utils.py b/ansible_collections/community/general/plugins/module_utils/hwc_utils.py
index a21cc8e48..86d29e272 100644
--- a/ansible_collections/community/general/plugins/module_utils/hwc_utils.py
+++ b/ansible_collections/community/general/plugins/module_utils/hwc_utils.py
@@ -203,7 +203,7 @@ class Config(object):
if url == "":
raise HwcClientException(
- 0, "Can not find the enpoint for %s" % service_type)
+ 0, "Cannot find the endpoint for %s" % service_type)
if url[-1] != "/":
url += "/"
@@ -351,7 +351,7 @@ def wait_to_finish(target, pending, refresh, timeout, min_interval=1, delay=3):
if pending and status not in pending:
raise HwcModuleException(
- "unexpect status(%s) occurred" % status)
+ "unexpected status(%s) occurred" % status)
if not is_last_time:
wait *= 2
@@ -362,7 +362,7 @@ def wait_to_finish(target, pending, refresh, timeout, min_interval=1, delay=3):
time.sleep(wait)
- raise HwcModuleException("asycn wait timeout after %d seconds" % timeout)
+ raise HwcModuleException("async wait timeout after %d seconds" % timeout)
def navigate_value(data, index, array_index=None):
diff --git a/ansible_collections/community/general/plugins/module_utils/identity/keycloak/keycloak.py b/ansible_collections/community/general/plugins/module_utils/identity/keycloak/keycloak.py
index 7e421f3bb..9e1c3f4d9 100644
--- a/ansible_collections/community/general/plugins/module_utils/identity/keycloak/keycloak.py
+++ b/ansible_collections/community/general/plugins/module_utils/identity/keycloak/keycloak.py
@@ -9,6 +9,7 @@ __metaclass__ = type
import json
import traceback
+import copy
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.parse import urlencode, quote
@@ -64,11 +65,21 @@ URL_CLIENT_GROUP_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/groups/{id
URL_CLIENT_GROUP_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/composite"
URL_USERS = "{url}/admin/realms/{realm}/users"
+URL_USER = "{url}/admin/realms/{realm}/users/{id}"
+URL_USER_ROLE_MAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings"
+URL_USER_REALM_ROLE_MAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings/realm"
+URL_USER_CLIENTS_ROLE_MAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients"
+URL_USER_CLIENT_ROLE_MAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client_id}"
+URL_USER_GROUPS = "{url}/admin/realms/{realm}/users/{id}/groups"
+URL_USER_GROUP = "{url}/admin/realms/{realm}/users/{id}/groups/{group_id}"
+
URL_CLIENT_SERVICE_ACCOUNT_USER = "{url}/admin/realms/{realm}/clients/{id}/service-account-user"
URL_CLIENT_USER_ROLEMAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client}"
URL_CLIENT_USER_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client}/available"
URL_CLIENT_USER_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client}/composite"
+URL_REALM_GROUP_ROLEMAPPINGS = "{url}/admin/realms/{realm}/groups/{group}/role-mappings/realm"
+
URL_CLIENTSECRET = "{url}/admin/realms/{realm}/clients/{id}/client-secret"
URL_AUTHENTICATION_FLOWS = "{url}/admin/realms/{realm}/authentication/flows"
@@ -81,6 +92,9 @@ URL_AUTHENTICATION_EXECUTION_CONFIG = "{url}/admin/realms/{realm}/authentication
URL_AUTHENTICATION_EXECUTION_RAISE_PRIORITY = "{url}/admin/realms/{realm}/authentication/executions/{id}/raise-priority"
URL_AUTHENTICATION_EXECUTION_LOWER_PRIORITY = "{url}/admin/realms/{realm}/authentication/executions/{id}/lower-priority"
URL_AUTHENTICATION_CONFIG = "{url}/admin/realms/{realm}/authentication/config/{id}"
+URL_AUTHENTICATION_REGISTER_REQUIRED_ACTION = "{url}/admin/realms/{realm}/authentication/register-required-action"
+URL_AUTHENTICATION_REQUIRED_ACTIONS = "{url}/admin/realms/{realm}/authentication/required-actions"
+URL_AUTHENTICATION_REQUIRED_ACTIONS_ALIAS = "{url}/admin/realms/{realm}/authentication/required-actions/{alias}"
URL_IDENTITY_PROVIDERS = "{url}/admin/realms/{realm}/identity-provider/instances"
URL_IDENTITY_PROVIDER = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}"
@@ -93,6 +107,20 @@ URL_COMPONENT = "{url}/admin/realms/{realm}/components/{id}"
URL_AUTHZ_AUTHORIZATION_SCOPE = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/scope/{id}"
URL_AUTHZ_AUTHORIZATION_SCOPES = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/scope"
+# This URL is used for:
+# - Querying client authorization permissions
+# - Removing client authorization permissions
+URL_AUTHZ_POLICIES = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/policy"
+URL_AUTHZ_POLICY = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/policy/{id}"
+
+URL_AUTHZ_PERMISSION = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/permission/{permission_type}/{id}"
+URL_AUTHZ_PERMISSIONS = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/permission/{permission_type}"
+
+URL_AUTHZ_RESOURCES = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/resource"
+
+URL_AUTHZ_CUSTOM_POLICY = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/policy/{policy_type}"
+URL_AUTHZ_CUSTOM_POLICIES = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/policy"
+
def keycloak_argument_spec():
"""
@@ -266,8 +294,8 @@ class KeycloakAPI(object):
if e.code == 404:
return None
else:
- self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)),
- exception=traceback.format_exc())
+ self.fail_open_url(e, msg='Could not obtain realm %s: %s' % (realm, str(e)),
+ exception=traceback.format_exc())
except ValueError as e:
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)),
exception=traceback.format_exc())
@@ -291,8 +319,8 @@ class KeycloakAPI(object):
if e.code == 404:
return None
else:
- self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)),
- exception=traceback.format_exc())
+ self.fail_open_url(e, msg='Could not obtain realm %s: %s' % (realm, str(e)),
+ exception=traceback.format_exc())
except ValueError as e:
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)),
exception=traceback.format_exc())
@@ -312,8 +340,8 @@ class KeycloakAPI(object):
return open_url(realm_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
data=json.dumps(realmrep), validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Could not update realm %s: %s' % (realm, str(e)),
- exception=traceback.format_exc())
+ self.fail_open_url(e, msg='Could not update realm %s: %s' % (realm, str(e)),
+ exception=traceback.format_exc())
def create_realm(self, realmrep):
""" Create a realm in keycloak
@@ -326,8 +354,8 @@ class KeycloakAPI(object):
return open_url(realm_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
data=json.dumps(realmrep), validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Could not create realm %s: %s' % (realmrep['id'], str(e)),
- exception=traceback.format_exc())
+ self.fail_open_url(e, msg='Could not create realm %s: %s' % (realmrep['id'], str(e)),
+ exception=traceback.format_exc())
def delete_realm(self, realm="master"):
""" Delete a realm from Keycloak
@@ -341,8 +369,8 @@ class KeycloakAPI(object):
return open_url(realm_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Could not delete realm %s: %s' % (realm, str(e)),
- exception=traceback.format_exc())
+ self.fail_open_url(e, msg='Could not delete realm %s: %s' % (realm, str(e)),
+ exception=traceback.format_exc())
def get_clients(self, realm='master', filter=None):
""" Obtains client representations for clients in a realm
@@ -363,7 +391,7 @@ class KeycloakAPI(object):
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of clients for realm %s: %s'
% (realm, str(e)))
except Exception as e:
- self.module.fail_json(msg='Could not obtain list of clients for realm %s: %s'
+ self.fail_open_url(e, msg='Could not obtain list of clients for realm %s: %s'
% (realm, str(e)))
def get_client_by_clientid(self, client_id, realm='master'):
@@ -396,7 +424,7 @@ class KeycloakAPI(object):
if e.code == 404:
return None
else:
- self.module.fail_json(msg='Could not obtain client %s for realm %s: %s'
+ self.fail_open_url(e, msg='Could not obtain client %s for realm %s: %s'
% (id, realm, str(e)))
except ValueError as e:
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain client %s for realm %s: %s'
@@ -431,7 +459,7 @@ class KeycloakAPI(object):
return open_url(client_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
data=json.dumps(clientrep), validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Could not update client %s in realm %s: %s'
+ self.fail_open_url(e, msg='Could not update client %s in realm %s: %s'
% (id, realm, str(e)))
def create_client(self, clientrep, realm="master"):
@@ -446,7 +474,7 @@ class KeycloakAPI(object):
return open_url(client_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
data=json.dumps(clientrep), validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Could not create client %s in realm %s: %s'
+ self.fail_open_url(e, msg='Could not create client %s in realm %s: %s'
% (clientrep['clientId'], realm, str(e)))
def delete_client(self, id, realm="master"):
@@ -462,7 +490,7 @@ class KeycloakAPI(object):
return open_url(client_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Could not delete client %s in realm %s: %s'
+ self.fail_open_url(e, msg='Could not delete client %s in realm %s: %s'
% (id, realm, str(e)))
def get_client_roles_by_id(self, cid, realm="master"):
@@ -478,7 +506,7 @@ class KeycloakAPI(object):
timeout=self.connection_timeout,
validate_certs=self.validate_certs).read()))
except Exception as e:
- self.module.fail_json(msg="Could not fetch rolemappings for client %s in realm %s: %s"
+ self.fail_open_url(e, msg="Could not fetch rolemappings for client %s in realm %s: %s"
% (cid, realm, str(e)))
def get_client_role_id_by_name(self, cid, name, realm="master"):
@@ -513,12 +541,12 @@ class KeycloakAPI(object):
if rid == role['id']:
return role
except Exception as e:
- self.module.fail_json(msg="Could not fetch rolemappings for client %s in group %s, realm %s: %s"
+ self.fail_open_url(e, msg="Could not fetch rolemappings for client %s in group %s, realm %s: %s"
% (cid, gid, realm, str(e)))
return None
def get_client_group_available_rolemappings(self, gid, cid, realm="master"):
- """ Fetch the available role of a client in a specified goup on the Keycloak server.
+ """ Fetch the available role of a client in a specified group on the Keycloak server.
:param gid: ID of the group from which to obtain the rolemappings.
:param cid: ID of the client from which to obtain the rolemappings.
@@ -531,7 +559,7 @@ class KeycloakAPI(object):
timeout=self.connection_timeout,
validate_certs=self.validate_certs).read()))
except Exception as e:
- self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s"
+ self.fail_open_url(e, msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s"
% (cid, gid, realm, str(e)))
def get_client_group_composite_rolemappings(self, gid, cid, realm="master"):
@@ -548,7 +576,7 @@ class KeycloakAPI(object):
timeout=self.connection_timeout,
validate_certs=self.validate_certs).read()))
except Exception as e:
- self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s"
+ self.fail_open_url(e, msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s"
% (cid, gid, realm, str(e)))
def get_role_by_id(self, rid, realm="master"):
@@ -564,7 +592,7 @@ class KeycloakAPI(object):
timeout=self.connection_timeout,
validate_certs=self.validate_certs).read()))
except Exception as e:
- self.module.fail_json(msg="Could not fetch role for id %s in realm %s: %s"
+ self.fail_open_url(e, msg="Could not fetch role for id %s in realm %s: %s"
% (rid, realm, str(e)))
def get_client_roles_by_id_composite_rolemappings(self, rid, cid, realm="master"):
@@ -581,7 +609,7 @@ class KeycloakAPI(object):
timeout=self.connection_timeout,
validate_certs=self.validate_certs).read()))
except Exception as e:
- self.module.fail_json(msg="Could not fetch role for id %s and cid %s in realm %s: %s"
+ self.fail_open_url(e, msg="Could not fetch role for id %s and cid %s in realm %s: %s"
% (rid, cid, realm, str(e)))
def add_client_roles_by_id_composite_rolemapping(self, rid, roles_rep, realm="master"):
@@ -597,11 +625,43 @@ class KeycloakAPI(object):
open_url(available_rolemappings_url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(roles_rep),
validate_certs=self.validate_certs, timeout=self.connection_timeout)
except Exception as e:
- self.module.fail_json(msg="Could not assign roles to composite role %s and realm %s: %s"
+ self.fail_open_url(e, msg="Could not assign roles to composite role %s and realm %s: %s"
% (rid, realm, str(e)))
+ def add_group_realm_rolemapping(self, gid, role_rep, realm="master"):
+ """ Add the specified realm role to specified group on the Keycloak server.
+
+ :param gid: ID of the group to add the role mapping.
+ :param role_rep: Representation of the role to assign.
+ :param realm: Realm from which to obtain the rolemappings.
+ :return: None.
+ """
+ url = URL_REALM_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, group=gid)
+ try:
+ open_url(url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
+ validate_certs=self.validate_certs, timeout=self.connection_timeout)
+ except Exception as e:
+ self.fail_open_url(e, msg="Could add realm role mappings for group %s, realm %s: %s"
+ % (gid, realm, str(e)))
+
+ def delete_group_realm_rolemapping(self, gid, role_rep, realm="master"):
+ """ Delete the specified realm role from the specified group on the Keycloak server.
+
+ :param gid: ID of the group from which to obtain the rolemappings.
+ :param role_rep: Representation of the role to assign.
+ :param realm: Realm from which to obtain the rolemappings.
+ :return: None.
+ """
+ url = URL_REALM_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, group=gid)
+ try:
+ open_url(url, method="DELETE", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
+ validate_certs=self.validate_certs, timeout=self.connection_timeout)
+ except Exception as e:
+ self.fail_open_url(e, msg="Could not delete realm role mappings for group %s, realm %s: %s"
+ % (gid, realm, str(e)))
+
def add_group_rolemapping(self, gid, cid, role_rep, realm="master"):
- """ Fetch the composite role of a client in a specified goup on the Keycloak server.
+ """ Fetch the composite role of a client in a specified group on the Keycloak server.
:param gid: ID of the group from which to obtain the rolemappings.
:param cid: ID of the client from which to obtain the rolemappings.
@@ -614,7 +674,7 @@ class KeycloakAPI(object):
open_url(available_rolemappings_url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
validate_certs=self.validate_certs, timeout=self.connection_timeout)
except Exception as e:
- self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s"
+ self.fail_open_url(e, msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s"
% (cid, gid, realm, str(e)))
def delete_group_rolemapping(self, gid, cid, role_rep, realm="master"):
@@ -631,7 +691,7 @@ class KeycloakAPI(object):
open_url(available_rolemappings_url, method="DELETE", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
validate_certs=self.validate_certs, timeout=self.connection_timeout)
except Exception as e:
- self.module.fail_json(msg="Could not delete available rolemappings for client %s in group %s, realm %s: %s"
+ self.fail_open_url(e, msg="Could not delete available rolemappings for client %s in group %s, realm %s: %s"
% (cid, gid, realm, str(e)))
def get_client_user_rolemapping_by_id(self, uid, cid, rid, realm='master'):
@@ -652,7 +712,7 @@ class KeycloakAPI(object):
if rid == role['id']:
return role
except Exception as e:
- self.module.fail_json(msg="Could not fetch rolemappings for client %s and user %s, realm %s: %s"
+ self.fail_open_url(e, msg="Could not fetch rolemappings for client %s and user %s, realm %s: %s"
% (cid, uid, realm, str(e)))
return None
@@ -670,7 +730,7 @@ class KeycloakAPI(object):
timeout=self.connection_timeout,
validate_certs=self.validate_certs).read()))
except Exception as e:
- self.module.fail_json(msg="Could not fetch effective rolemappings for client %s and user %s, realm %s: %s"
+ self.fail_open_url(e, msg="Could not fetch effective rolemappings for client %s and user %s, realm %s: %s"
% (cid, uid, realm, str(e)))
def get_client_user_composite_rolemappings(self, uid, cid, realm="master"):
@@ -687,7 +747,7 @@ class KeycloakAPI(object):
timeout=self.connection_timeout,
validate_certs=self.validate_certs).read()))
except Exception as e:
- self.module.fail_json(msg="Could not fetch available rolemappings for user %s of realm %s: %s"
+ self.fail_open_url(e, msg="Could not fetch available rolemappings for user %s of realm %s: %s"
% (uid, realm, str(e)))
def get_realm_user_rolemapping_by_id(self, uid, rid, realm='master'):
@@ -707,7 +767,7 @@ class KeycloakAPI(object):
if rid == role['id']:
return role
except Exception as e:
- self.module.fail_json(msg="Could not fetch rolemappings for user %s, realm %s: %s"
+ self.fail_open_url(e, msg="Could not fetch rolemappings for user %s, realm %s: %s"
% (uid, realm, str(e)))
return None
@@ -724,7 +784,7 @@ class KeycloakAPI(object):
timeout=self.connection_timeout,
validate_certs=self.validate_certs).read()))
except Exception as e:
- self.module.fail_json(msg="Could not fetch available rolemappings for user %s of realm %s: %s"
+ self.fail_open_url(e, msg="Could not fetch available rolemappings for user %s of realm %s: %s"
% (uid, realm, str(e)))
def get_realm_user_composite_rolemappings(self, uid, realm="master"):
@@ -740,7 +800,7 @@ class KeycloakAPI(object):
timeout=self.connection_timeout,
validate_certs=self.validate_certs).read()))
except Exception as e:
- self.module.fail_json(msg="Could not fetch effective rolemappings for user %s, realm %s: %s"
+ self.fail_open_url(e, msg="Could not fetch effective rolemappings for user %s, realm %s: %s"
% (uid, realm, str(e)))
def get_user_by_username(self, username, realm="master"):
@@ -754,7 +814,8 @@ class KeycloakAPI(object):
users_url += '?username=%s&exact=true' % username
try:
userrep = None
- users = json.loads(to_native(open_url(users_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
+ users = json.loads(to_native(open_url(users_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
validate_certs=self.validate_certs).read()))
for user in users:
if user['username'] == username:
@@ -766,7 +827,7 @@ class KeycloakAPI(object):
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain the user for realm %s and username %s: %s'
% (realm, username, str(e)))
except Exception as e:
- self.module.fail_json(msg='Could not obtain the user for realm %s and username %s: %s'
+ self.fail_open_url(e, msg='Could not obtain the user for realm %s and username %s: %s'
% (realm, username, str(e)))
def get_service_account_user_by_client_id(self, client_id, realm="master"):
@@ -780,13 +841,14 @@ class KeycloakAPI(object):
service_account_user_url = URL_CLIENT_SERVICE_ACCOUNT_USER.format(url=self.baseurl, realm=realm, id=cid)
try:
- return json.loads(to_native(open_url(service_account_user_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
+ return json.loads(to_native(open_url(service_account_user_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
validate_certs=self.validate_certs).read()))
except ValueError as e:
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain the service-account-user for realm %s and client_id %s: %s'
% (realm, client_id, str(e)))
except Exception as e:
- self.module.fail_json(msg='Could not obtain the service-account-user for realm %s and client_id %s: %s'
+ self.fail_open_url(e, msg='Could not obtain the service-account-user for realm %s and client_id %s: %s'
% (realm, client_id, str(e)))
def add_user_rolemapping(self, uid, cid, role_rep, realm="master"):
@@ -804,7 +866,7 @@ class KeycloakAPI(object):
open_url(user_realm_rolemappings_url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
validate_certs=self.validate_certs, timeout=self.connection_timeout)
except Exception as e:
- self.module.fail_json(msg="Could not map roles to userId %s for realm %s and roles %s: %s"
+ self.fail_open_url(e, msg="Could not map roles to userId %s for realm %s and roles %s: %s"
% (uid, realm, json.dumps(role_rep), str(e)))
else:
user_client_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid, client=cid)
@@ -812,7 +874,7 @@ class KeycloakAPI(object):
open_url(user_client_rolemappings_url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
validate_certs=self.validate_certs, timeout=self.connection_timeout)
except Exception as e:
- self.module.fail_json(msg="Could not map roles to userId %s for client %s, realm %s and roles %s: %s"
+ self.fail_open_url(e, msg="Could not map roles to userId %s for client %s, realm %s and roles %s: %s"
% (cid, uid, realm, json.dumps(role_rep), str(e)))
def delete_user_rolemapping(self, uid, cid, role_rep, realm="master"):
@@ -830,7 +892,7 @@ class KeycloakAPI(object):
open_url(user_realm_rolemappings_url, method="DELETE", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
validate_certs=self.validate_certs, timeout=self.connection_timeout)
except Exception as e:
- self.module.fail_json(msg="Could not remove roles %s from userId %s, realm %s: %s"
+ self.fail_open_url(e, msg="Could not remove roles %s from userId %s, realm %s: %s"
% (json.dumps(role_rep), uid, realm, str(e)))
else:
user_client_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid, client=cid)
@@ -838,7 +900,7 @@ class KeycloakAPI(object):
open_url(user_client_rolemappings_url, method="DELETE", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
validate_certs=self.validate_certs, timeout=self.connection_timeout)
except Exception as e:
- self.module.fail_json(msg="Could not remove roles %s for client %s from userId %s, realm %s: %s"
+ self.fail_open_url(e, msg="Could not remove roles %s for client %s from userId %s, realm %s: %s"
% (json.dumps(role_rep), cid, uid, realm, str(e)))
def get_client_templates(self, realm='master'):
@@ -856,7 +918,7 @@ class KeycloakAPI(object):
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of client templates for realm %s: %s'
% (realm, str(e)))
except Exception as e:
- self.module.fail_json(msg='Could not obtain list of client templates for realm %s: %s'
+ self.fail_open_url(e, msg='Could not obtain list of client templates for realm %s: %s'
% (realm, str(e)))
def get_client_template_by_id(self, id, realm='master'):
@@ -875,7 +937,7 @@ class KeycloakAPI(object):
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain client templates %s for realm %s: %s'
% (id, realm, str(e)))
except Exception as e:
- self.module.fail_json(msg='Could not obtain client template %s for realm %s: %s'
+ self.fail_open_url(e, msg='Could not obtain client template %s for realm %s: %s'
% (id, realm, str(e)))
def get_client_template_by_name(self, name, realm='master'):
@@ -918,7 +980,7 @@ class KeycloakAPI(object):
return open_url(url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
data=json.dumps(clienttrep), validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Could not update client template %s in realm %s: %s'
+ self.fail_open_url(e, msg='Could not update client template %s in realm %s: %s'
% (id, realm, str(e)))
def create_client_template(self, clienttrep, realm="master"):
@@ -933,7 +995,7 @@ class KeycloakAPI(object):
return open_url(url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
data=json.dumps(clienttrep), validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Could not create client template %s in realm %s: %s'
+ self.fail_open_url(e, msg='Could not create client template %s in realm %s: %s'
% (clienttrep['clientId'], realm, str(e)))
def delete_client_template(self, id, realm="master"):
@@ -949,7 +1011,7 @@ class KeycloakAPI(object):
return open_url(url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Could not delete client template %s in realm %s: %s'
+ self.fail_open_url(e, msg='Could not delete client template %s in realm %s: %s'
% (id, realm, str(e)))
def get_clientscopes(self, realm="master"):
@@ -967,7 +1029,7 @@ class KeycloakAPI(object):
timeout=self.connection_timeout,
validate_certs=self.validate_certs).read()))
except Exception as e:
- self.module.fail_json(msg="Could not fetch list of clientscopes in realm %s: %s"
+ self.fail_open_url(e, msg="Could not fetch list of clientscopes in realm %s: %s"
% (realm, str(e)))
def get_clientscope_by_clientscopeid(self, cid, realm="master"):
@@ -989,7 +1051,7 @@ class KeycloakAPI(object):
if e.code == 404:
return None
else:
- self.module.fail_json(msg="Could not fetch clientscope %s in realm %s: %s"
+ self.fail_open_url(e, msg="Could not fetch clientscope %s in realm %s: %s"
% (cid, realm, str(e)))
except Exception as e:
self.module.fail_json(msg="Could not clientscope group %s in realm %s: %s"
@@ -1030,7 +1092,7 @@ class KeycloakAPI(object):
return open_url(clientscopes_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
data=json.dumps(clientscoperep), validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg="Could not create clientscope %s in realm %s: %s"
+ self.fail_open_url(e, msg="Could not create clientscope %s in realm %s: %s"
% (clientscoperep['name'], realm, str(e)))
def update_clientscope(self, clientscoperep, realm="master"):
@@ -1046,7 +1108,7 @@ class KeycloakAPI(object):
data=json.dumps(clientscoperep), validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Could not update clientscope %s in realm %s: %s'
+ self.fail_open_url(e, msg='Could not update clientscope %s in realm %s: %s'
% (clientscoperep['name'], realm, str(e)))
def delete_clientscope(self, name=None, cid=None, realm="master"):
@@ -1084,7 +1146,7 @@ class KeycloakAPI(object):
validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg="Unable to delete clientscope %s: %s" % (cid, str(e)))
+ self.fail_open_url(e, msg="Unable to delete clientscope %s: %s" % (cid, str(e)))
def get_clientscope_protocolmappers(self, cid, realm="master"):
""" Fetch the name and ID of all clientscopes on the Keycloak server.
@@ -1102,7 +1164,7 @@ class KeycloakAPI(object):
timeout=self.connection_timeout,
validate_certs=self.validate_certs).read()))
except Exception as e:
- self.module.fail_json(msg="Could not fetch list of protocolmappers in realm %s: %s"
+ self.fail_open_url(e, msg="Could not fetch list of protocolmappers in realm %s: %s"
% (realm, str(e)))
def get_clientscope_protocolmapper_by_protocolmapperid(self, pid, cid, realm="master"):
@@ -1126,7 +1188,7 @@ class KeycloakAPI(object):
if e.code == 404:
return None
else:
- self.module.fail_json(msg="Could not fetch protocolmapper %s in realm %s: %s"
+ self.fail_open_url(e, msg="Could not fetch protocolmapper %s in realm %s: %s"
% (pid, realm, str(e)))
except Exception as e:
self.module.fail_json(msg="Could not fetch protocolmapper %s in realm %s: %s"
@@ -1169,7 +1231,7 @@ class KeycloakAPI(object):
return open_url(protocolmappers_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
data=json.dumps(mapper_rep), validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg="Could not create protocolmapper %s in realm %s: %s"
+ self.fail_open_url(e, msg="Could not create protocolmapper %s in realm %s: %s"
% (mapper_rep['name'], realm, str(e)))
def update_clientscope_protocolmappers(self, cid, mapper_rep, realm="master"):
@@ -1186,7 +1248,7 @@ class KeycloakAPI(object):
data=json.dumps(mapper_rep), validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Could not update protocolmappers for clientscope %s in realm %s: %s'
+ self.fail_open_url(e, msg='Could not update protocolmappers for clientscope %s in realm %s: %s'
% (mapper_rep, realm, str(e)))
def get_default_clientscopes(self, realm, client_id=None):
@@ -1210,7 +1272,7 @@ class KeycloakAPI(object):
:param realm: Realm in which the clientscope resides.
:param client_id: The client in which the clientscope resides.
- :return The optinal clientscopes of this realm or client
+ :return The optional clientscopes of this realm or client
"""
url = URL_OPTIONAL_CLIENTSCOPES if client_id is None else URL_CLIENT_OPTIONAL_CLIENTSCOPES
return self._get_clientscopes_of_type(realm, url, 'optional', client_id)
@@ -1223,7 +1285,7 @@ class KeycloakAPI(object):
:param realm: Realm in which the clientscope resides.
:param url_template the template for the right type
- :param scope_type this can be either optinal or default
+ :param scope_type this can be either optional or default
:param client_id: The client in which the clientscope resides.
:return The clientscopes of the specified type of this realm
"""
@@ -1233,7 +1295,7 @@ class KeycloakAPI(object):
return json.loads(to_native(open_url(clientscopes_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
timeout=self.connection_timeout, validate_certs=self.validate_certs).read()))
except Exception as e:
- self.module.fail_json(msg="Could not fetch list of %s clientscopes in realm %s: %s" % (scope_type, realm, str(e)))
+ self.fail_open_url(e, msg="Could not fetch list of %s clientscopes in realm %s: %s" % (scope_type, realm, str(e)))
else:
cid = self.get_client_id(client_id=client_id, realm=realm)
clientscopes_url = url_template.format(url=self.baseurl, realm=realm, cid=cid)
@@ -1241,11 +1303,11 @@ class KeycloakAPI(object):
return json.loads(to_native(open_url(clientscopes_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
timeout=self.connection_timeout, validate_certs=self.validate_certs).read()))
except Exception as e:
- self.module.fail_json(msg="Could not fetch list of %s clientscopes in client %s: %s" % (scope_type, client_id, clientscopes_url))
+ self.fail_open_url(e, msg="Could not fetch list of %s clientscopes in client %s: %s" % (scope_type, client_id, clientscopes_url))
def _decide_url_type_clientscope(self, client_id=None, scope_type="default"):
"""Decides which url to use.
- :param scope_type this can be either optinal or default
+ :param scope_type this can be either optional or default
:param client_id: The client in which the clientscope resides.
"""
if client_id is None:
@@ -1312,7 +1374,7 @@ class KeycloakAPI(object):
except Exception as e:
place = 'realm' if client_id is None else 'client ' + client_id
- self.module.fail_json(msg="Unable to %s %s clientscope %s @ %s : %s" % (action, scope_type, id, place, str(e)))
+ self.fail_open_url(e, msg="Unable to %s %s clientscope %s @ %s : %s" % (action, scope_type, id, place, str(e)))
def create_clientsecret(self, id, realm="master"):
""" Generate a new client secret by id
@@ -1324,14 +1386,15 @@ class KeycloakAPI(object):
clientsecret_url = URL_CLIENTSECRET.format(url=self.baseurl, realm=realm, id=id)
try:
- return json.loads(to_native(open_url(clientsecret_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout,
+ return json.loads(to_native(open_url(clientsecret_url, method='POST', http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
validate_certs=self.validate_certs).read()))
except HTTPError as e:
if e.code == 404:
return None
else:
- self.module.fail_json(msg='Could not obtain clientsecret of client %s for realm %s: %s'
+ self.fail_open_url(e, msg='Could not obtain clientsecret of client %s for realm %s: %s'
% (id, realm, str(e)))
except Exception as e:
self.module.fail_json(msg='Could not obtain clientsecret of client %s for realm %s: %s'
@@ -1347,14 +1410,15 @@ class KeycloakAPI(object):
clientsecret_url = URL_CLIENTSECRET.format(url=self.baseurl, realm=realm, id=id)
try:
- return json.loads(to_native(open_url(clientsecret_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
+ return json.loads(to_native(open_url(clientsecret_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
validate_certs=self.validate_certs).read()))
except HTTPError as e:
if e.code == 404:
return None
else:
- self.module.fail_json(msg='Could not obtain clientsecret of client %s for realm %s: %s'
+ self.fail_open_url(e, msg='Could not obtain clientsecret of client %s for realm %s: %s'
% (id, realm, str(e)))
except Exception as e:
self.module.fail_json(msg='Could not obtain clientsecret of client %s for realm %s: %s'
@@ -1374,7 +1438,7 @@ class KeycloakAPI(object):
timeout=self.connection_timeout,
validate_certs=self.validate_certs).read()))
except Exception as e:
- self.module.fail_json(msg="Could not fetch list of groups in realm %s: %s"
+ self.fail_open_url(e, msg="Could not fetch list of groups in realm %s: %s"
% (realm, str(e)))
def get_group_by_groupid(self, gid, realm="master"):
@@ -1395,7 +1459,7 @@ class KeycloakAPI(object):
if e.code == 404:
return None
else:
- self.module.fail_json(msg="Could not fetch group %s in realm %s: %s"
+ self.fail_open_url(e, msg="Could not fetch group %s in realm %s: %s"
% (gid, realm, str(e)))
except Exception as e:
self.module.fail_json(msg="Could not fetch group %s in realm %s: %s"
@@ -1490,7 +1554,7 @@ class KeycloakAPI(object):
def get_subgroup_direct_parent(self, parents, realm="master", children_to_resolve=None):
""" Get keycloak direct parent group API object for a given chain of parents.
- To succesfully work the API for subgroups we actually dont need
+ To successfully work the API for subgroups we actually don't need
to "walk the whole tree" for nested groups but only need to know
the ID for the direct predecessor of current subgroup. This
method will guarantee us this information getting there with
@@ -1542,7 +1606,7 @@ class KeycloakAPI(object):
return open_url(groups_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
data=json.dumps(grouprep), validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg="Could not create group %s in realm %s: %s"
+ self.fail_open_url(e, msg="Could not create group %s in realm %s: %s"
% (grouprep['name'], realm, str(e)))
def create_subgroup(self, parents, grouprep, realm="master"):
@@ -1570,7 +1634,7 @@ class KeycloakAPI(object):
return open_url(url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
data=json.dumps(grouprep), validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg="Could not create subgroup %s for parent group %s in realm %s: %s"
+ self.fail_open_url(e, msg="Could not create subgroup %s for parent group %s in realm %s: %s"
% (grouprep['name'], parent_id, realm, str(e)))
def update_group(self, grouprep, realm="master"):
@@ -1585,7 +1649,7 @@ class KeycloakAPI(object):
return open_url(group_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
data=json.dumps(grouprep), validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Could not update group %s in realm %s: %s'
+ self.fail_open_url(e, msg='Could not update group %s in realm %s: %s'
% (grouprep['name'], realm, str(e)))
def delete_group(self, name=None, groupid=None, realm="master"):
@@ -1622,7 +1686,7 @@ class KeycloakAPI(object):
return open_url(group_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg="Unable to delete group %s: %s" % (groupid, str(e)))
+ self.fail_open_url(e, msg="Unable to delete group %s: %s" % (groupid, str(e)))
def get_realm_roles(self, realm='master'):
""" Obtains role representations for roles in a realm
@@ -1639,7 +1703,7 @@ class KeycloakAPI(object):
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of roles for realm %s: %s'
% (realm, str(e)))
except Exception as e:
- self.module.fail_json(msg='Could not obtain list of roles for realm %s: %s'
+ self.fail_open_url(e, msg='Could not obtain list of roles for realm %s: %s'
% (realm, str(e)))
def get_realm_role(self, name, realm='master'):
@@ -1649,7 +1713,7 @@ class KeycloakAPI(object):
:param name: Name of the role to fetch.
:param realm: Realm in which the role resides; default 'master'.
"""
- role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name))
+ role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name, safe=''))
try:
return json.loads(to_native(open_url(role_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
validate_certs=self.validate_certs).read()))
@@ -1657,7 +1721,7 @@ class KeycloakAPI(object):
if e.code == 404:
return None
else:
- self.module.fail_json(msg='Could not fetch role %s in realm %s: %s'
+ self.fail_open_url(e, msg='Could not fetch role %s in realm %s: %s'
% (name, realm, str(e)))
except Exception as e:
self.module.fail_json(msg='Could not fetch role %s in realm %s: %s'
@@ -1671,10 +1735,13 @@ class KeycloakAPI(object):
"""
roles_url = URL_REALM_ROLES.format(url=self.baseurl, realm=realm)
try:
+ if "composites" in rolerep:
+ keycloak_compatible_composites = self.convert_role_composites(rolerep["composites"])
+ rolerep["composites"] = keycloak_compatible_composites
return open_url(roles_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
data=json.dumps(rolerep), validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Could not create role %s in realm %s: %s'
+ self.fail_open_url(e, msg='Could not create role %s in realm %s: %s'
% (rolerep['name'], realm, str(e)))
def update_realm_role(self, rolerep, realm='master'):
@@ -1683,26 +1750,138 @@ class KeycloakAPI(object):
:param rolerep: A RoleRepresentation of the updated role.
:return HTTPResponse object on success
"""
- role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(rolerep['name']))
+ role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(rolerep['name']), safe='')
try:
- return open_url(role_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(rolerep), validate_certs=self.validate_certs)
+ composites = None
+ if "composites" in rolerep:
+ composites = copy.deepcopy(rolerep["composites"])
+ del rolerep["composites"]
+ role_response = open_url(role_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(rolerep), validate_certs=self.validate_certs)
+ if composites is not None:
+ self.update_role_composites(rolerep=rolerep, composites=composites, realm=realm)
+ return role_response
+ except Exception as e:
+ self.fail_open_url(e, msg='Could not update role %s in realm %s: %s'
+ % (rolerep['name'], realm, str(e)))
+
+ def get_role_composites(self, rolerep, clientid=None, realm='master'):
+ composite_url = ''
+ try:
+ if clientid is not None:
+ client = self.get_client_by_clientid(client_id=clientid, realm=realm)
+ cid = client['id']
+ composite_url = URL_CLIENT_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep["name"], safe=''))
+ else:
+ composite_url = URL_REALM_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, name=quote(rolerep["name"], safe=''))
+ # Get existing composites
+ return json.loads(to_native(open_url(
+ composite_url,
+ method='GET',
+ http_agent=self.http_agent,
+ headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except Exception as e:
+ self.fail_open_url(e, msg='Could not get role %s composites in realm %s: %s'
+ % (rolerep['name'], realm, str(e)))
+
+ def create_role_composites(self, rolerep, composites, clientid=None, realm='master'):
+ composite_url = ''
+ try:
+ if clientid is not None:
+ client = self.get_client_by_clientid(client_id=clientid, realm=realm)
+ cid = client['id']
+ composite_url = URL_CLIENT_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep["name"], safe=''))
+ else:
+ composite_url = URL_REALM_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, name=quote(rolerep["name"], safe=''))
+ # Get existing composites
+ # create new composites
+ return open_url(composite_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(composites), validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Could not update role %s in realm %s: %s'
+ self.fail_open_url(e, msg='Could not create role %s composites in realm %s: %s'
% (rolerep['name'], realm, str(e)))
+ def delete_role_composites(self, rolerep, composites, clientid=None, realm='master'):
+ composite_url = ''
+ try:
+ if clientid is not None:
+ client = self.get_client_by_clientid(client_id=clientid, realm=realm)
+ cid = client['id']
+ composite_url = URL_CLIENT_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep["name"], safe=''))
+ else:
+ composite_url = URL_REALM_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, name=quote(rolerep["name"], safe=''))
+ # Get existing composites
+ # create new composites
+ return open_url(composite_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(composites), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.fail_open_url(e, msg='Could not create role %s composites in realm %s: %s'
+ % (rolerep['name'], realm, str(e)))
+
+ def update_role_composites(self, rolerep, composites, clientid=None, realm='master'):
+ # Get existing composites
+ existing_composites = self.get_role_composites(rolerep=rolerep, clientid=clientid, realm=realm)
+ composites_to_be_created = []
+ composites_to_be_deleted = []
+ for composite in composites:
+ composite_found = False
+ existing_composite_client = None
+ for existing_composite in existing_composites:
+ if existing_composite["clientRole"]:
+ existing_composite_client = self.get_client_by_id(existing_composite["containerId"], realm=realm)
+ if ("client_id" in composite
+ and composite['client_id'] is not None
+ and existing_composite_client["clientId"] == composite["client_id"]
+ and composite["name"] == existing_composite["name"]):
+ composite_found = True
+ break
+ else:
+ if (("client_id" not in composite or composite['client_id'] is None)
+ and composite["name"] == existing_composite["name"]):
+ composite_found = True
+ break
+ if (not composite_found and ('state' not in composite or composite['state'] == 'present')):
+ if "client_id" in composite and composite['client_id'] is not None:
+ client_roles = self.get_client_roles(clientid=composite['client_id'], realm=realm)
+ for client_role in client_roles:
+ if client_role['name'] == composite['name']:
+ composites_to_be_created.append(client_role)
+ break
+ else:
+ realm_role = self.get_realm_role(name=composite["name"], realm=realm)
+ composites_to_be_created.append(realm_role)
+ elif composite_found and 'state' in composite and composite['state'] == 'absent':
+ if "client_id" in composite and composite['client_id'] is not None:
+ client_roles = self.get_client_roles(clientid=composite['client_id'], realm=realm)
+ for client_role in client_roles:
+ if client_role['name'] == composite['name']:
+ composites_to_be_deleted.append(client_role)
+ break
+ else:
+ realm_role = self.get_realm_role(name=composite["name"], realm=realm)
+ composites_to_be_deleted.append(realm_role)
+
+ if len(composites_to_be_created) > 0:
+ # create new composites
+ self.create_role_composites(rolerep=rolerep, composites=composites_to_be_created, clientid=clientid, realm=realm)
+ if len(composites_to_be_deleted) > 0:
+ # delete new composites
+ self.delete_role_composites(rolerep=rolerep, composites=composites_to_be_deleted, clientid=clientid, realm=realm)
+
def delete_realm_role(self, name, realm='master'):
""" Delete a realm role.
:param name: The name of the role.
:param realm: The realm in which this role resides, default "master".
"""
- role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name))
+ role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name, safe=''))
try:
return open_url(role_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Unable to delete role %s in realm %s: %s'
+ self.fail_open_url(e, msg='Unable to delete role %s in realm %s: %s'
% (name, realm, str(e)))
def get_client_roles(self, clientid, realm='master'):
@@ -1725,7 +1904,7 @@ class KeycloakAPI(object):
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of roles for client %s in realm %s: %s'
% (clientid, realm, str(e)))
except Exception as e:
- self.module.fail_json(msg='Could not obtain list of roles for client %s in realm %s: %s'
+ self.fail_open_url(e, msg='Could not obtain list of roles for client %s in realm %s: %s'
% (clientid, realm, str(e)))
def get_client_role(self, name, clientid, realm='master'):
@@ -1741,7 +1920,7 @@ class KeycloakAPI(object):
if cid is None:
self.module.fail_json(msg='Could not find client %s in realm %s'
% (clientid, realm))
- role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name))
+ role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name, safe=''))
try:
return json.loads(to_native(open_url(role_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
validate_certs=self.validate_certs).read()))
@@ -1749,7 +1928,7 @@ class KeycloakAPI(object):
if e.code == 404:
return None
else:
- self.module.fail_json(msg='Could not fetch role %s in client %s of realm %s: %s'
+ self.fail_open_url(e, msg='Could not fetch role %s in client %s of realm %s: %s'
% (name, clientid, realm, str(e)))
except Exception as e:
self.module.fail_json(msg='Could not fetch role %s for client %s in realm %s: %s'
@@ -1769,12 +1948,30 @@ class KeycloakAPI(object):
% (clientid, realm))
roles_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid)
try:
+ if "composites" in rolerep:
+ keycloak_compatible_composites = self.convert_role_composites(rolerep["composites"])
+ rolerep["composites"] = keycloak_compatible_composites
return open_url(roles_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
data=json.dumps(rolerep), validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Could not create role %s for client %s in realm %s: %s'
+ self.fail_open_url(e, msg='Could not create role %s for client %s in realm %s: %s'
% (rolerep['name'], clientid, realm, str(e)))
+ def convert_role_composites(self, composites):
+ keycloak_compatible_composites = {
+ 'client': {},
+ 'realm': []
+ }
+ for composite in composites:
+ if 'state' not in composite or composite['state'] == 'present':
+ if "client_id" in composite and composite["client_id"] is not None:
+ if composite["client_id"] not in keycloak_compatible_composites["client"]:
+ keycloak_compatible_composites["client"][composite["client_id"]] = []
+ keycloak_compatible_composites["client"][composite["client_id"]].append(composite["name"])
+ else:
+ keycloak_compatible_composites["realm"].append(composite["name"])
+ return keycloak_compatible_composites
+
def update_client_role(self, rolerep, clientid, realm="master"):
""" Update an existing client role.
@@ -1787,12 +1984,19 @@ class KeycloakAPI(object):
if cid is None:
self.module.fail_json(msg='Could not find client %s in realm %s'
% (clientid, realm))
- role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep['name']))
- try:
- return open_url(role_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(rolerep), validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Could not update role %s for client %s in realm %s: %s'
+ role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep['name'], safe=''))
+ try:
+ composites = None
+ if "composites" in rolerep:
+ composites = copy.deepcopy(rolerep["composites"])
+ del rolerep['composites']
+ update_role_response = open_url(role_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(rolerep), validate_certs=self.validate_certs)
+ if composites is not None:
+ self.update_role_composites(rolerep=rolerep, clientid=clientid, composites=composites, realm=realm)
+ return update_role_response
+ except Exception as e:
+ self.fail_open_url(e, msg='Could not update role %s for client %s in realm %s: %s'
% (rolerep['name'], clientid, realm, str(e)))
def delete_client_role(self, name, clientid, realm="master"):
@@ -1806,12 +2010,12 @@ class KeycloakAPI(object):
if cid is None:
self.module.fail_json(msg='Could not find client %s in realm %s'
% (clientid, realm))
- role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name))
+ role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name, safe=''))
try:
return open_url(role_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Unable to delete role %s for client %s in realm %s: %s'
+ self.fail_open_url(e, msg='Unable to delete role %s for client %s in realm %s: %s'
% (name, clientid, realm, str(e)))
def get_authentication_flow_by_alias(self, alias, realm='master'):
@@ -1833,7 +2037,7 @@ class KeycloakAPI(object):
break
return authentication_flow
except Exception as e:
- self.module.fail_json(msg="Unable get authentication flow %s: %s" % (alias, str(e)))
+ self.fail_open_url(e, msg="Unable get authentication flow %s: %s" % (alias, str(e)))
def delete_authentication_flow_by_id(self, id, realm='master'):
"""
@@ -1848,8 +2052,8 @@ class KeycloakAPI(object):
return open_url(flow_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Could not delete authentication flow %s in realm %s: %s'
- % (id, realm, str(e)))
+ self.fail_open_url(e, msg='Could not delete authentication flow %s in realm %s: %s'
+ % (id, realm, str(e)))
def copy_auth_flow(self, config, realm='master'):
"""
@@ -1866,7 +2070,7 @@ class KeycloakAPI(object):
URL_AUTHENTICATION_FLOW_COPY.format(
url=self.baseurl,
realm=realm,
- copyfrom=quote(config["copyFrom"])),
+ copyfrom=quote(config["copyFrom"], safe='')),
method='POST',
http_agent=self.http_agent, headers=self.restheaders,
data=json.dumps(new_name),
@@ -1885,8 +2089,8 @@ class KeycloakAPI(object):
return flow
return None
except Exception as e:
- self.module.fail_json(msg='Could not copy authentication flow %s in realm %s: %s'
- % (config["alias"], realm, str(e)))
+ self.fail_open_url(e, msg='Could not copy authentication flow %s in realm %s: %s'
+ % (config["alias"], realm, str(e)))
def create_empty_auth_flow(self, config, realm='master'):
"""
@@ -1925,8 +2129,8 @@ class KeycloakAPI(object):
return flow
return None
except Exception as e:
- self.module.fail_json(msg='Could not create empty authentication flow %s in realm %s: %s'
- % (config["alias"], realm, str(e)))
+ self.fail_open_url(e, msg='Could not create empty authentication flow %s in realm %s: %s'
+ % (config["alias"], realm, str(e)))
def update_authentication_executions(self, flowAlias, updatedExec, realm='master'):
""" Update authentication executions
@@ -1940,15 +2144,15 @@ class KeycloakAPI(object):
URL_AUTHENTICATION_FLOW_EXECUTIONS.format(
url=self.baseurl,
realm=realm,
- flowalias=quote(flowAlias)),
+ flowalias=quote(flowAlias, safe='')),
method='PUT',
http_agent=self.http_agent, headers=self.restheaders,
data=json.dumps(updatedExec),
timeout=self.connection_timeout,
validate_certs=self.validate_certs)
except HTTPError as e:
- self.module.fail_json(msg="Unable to update execution '%s': %s: %s %s" %
- (flowAlias, repr(e), ";".join([e.url, e.msg, str(e.code), str(e.hdrs)]), str(updatedExec)))
+ self.fail_open_url(e, msg="Unable to update execution '%s': %s: %s %s"
+ % (flowAlias, repr(e), ";".join([e.url, e.msg, str(e.code), str(e.hdrs)]), str(updatedExec)))
except Exception as e:
self.module.fail_json(msg="Unable to update executions %s: %s" % (updatedExec, str(e)))
@@ -1971,7 +2175,7 @@ class KeycloakAPI(object):
timeout=self.connection_timeout,
validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg="Unable to add authenticationConfig %s: %s" % (executionId, str(e)))
+ self.fail_open_url(e, msg="Unable to add authenticationConfig %s: %s" % (executionId, str(e)))
def create_subflow(self, subflowName, flowAlias, realm='master', flowType='basic-flow'):
""" Create new sublow on the flow
@@ -1989,14 +2193,14 @@ class KeycloakAPI(object):
URL_AUTHENTICATION_FLOW_EXECUTIONS_FLOW.format(
url=self.baseurl,
realm=realm,
- flowalias=quote(flowAlias)),
+ flowalias=quote(flowAlias, safe='')),
method='POST',
http_agent=self.http_agent, headers=self.restheaders,
data=json.dumps(newSubFlow),
timeout=self.connection_timeout,
validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg="Unable to create new subflow %s: %s" % (subflowName, str(e)))
+ self.fail_open_url(e, msg="Unable to create new subflow %s: %s" % (subflowName, str(e)))
def create_execution(self, execution, flowAlias, realm='master'):
""" Create new execution on the flow
@@ -2013,15 +2217,15 @@ class KeycloakAPI(object):
URL_AUTHENTICATION_FLOW_EXECUTIONS_EXECUTION.format(
url=self.baseurl,
realm=realm,
- flowalias=quote(flowAlias)),
+ flowalias=quote(flowAlias, safe='')),
method='POST',
http_agent=self.http_agent, headers=self.restheaders,
data=json.dumps(newExec),
timeout=self.connection_timeout,
validate_certs=self.validate_certs)
except HTTPError as e:
- self.module.fail_json(msg="Unable to create new execution '%s' %s: %s: %s %s" %
- (flowAlias, execution["providerId"], repr(e), ";".join([e.url, e.msg, str(e.code), str(e.hdrs)]), str(newExec)))
+ self.fail_open_url(e, msg="Unable to create new execution '%s' %s: %s: %s %s"
+ % (flowAlias, execution["providerId"], repr(e), ";".join([e.url, e.msg, str(e.code), str(e.hdrs)]), str(newExec)))
except Exception as e:
self.module.fail_json(msg="Unable to create new execution '%s' %s: %s" % (flowAlias, execution["providerId"], repr(e)))
@@ -2057,7 +2261,7 @@ class KeycloakAPI(object):
timeout=self.connection_timeout,
validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg="Unable to change execution priority %s: %s" % (executionId, str(e)))
+ self.fail_open_url(e, msg="Unable to change execution priority %s: %s" % (executionId, str(e)))
def get_executions_representation(self, config, realm='master'):
"""
@@ -2073,7 +2277,7 @@ class KeycloakAPI(object):
URL_AUTHENTICATION_FLOW_EXECUTIONS.format(
url=self.baseurl,
realm=realm,
- flowalias=quote(config["alias"])),
+ flowalias=quote(config["alias"], safe='')),
method='GET',
http_agent=self.http_agent, headers=self.restheaders,
timeout=self.connection_timeout,
@@ -2094,8 +2298,121 @@ class KeycloakAPI(object):
execution["authenticationConfig"] = execConfig
return executions
except Exception as e:
- self.module.fail_json(msg='Could not get executions for authentication flow %s in realm %s: %s'
- % (config["alias"], realm, str(e)))
+ self.fail_open_url(e, msg='Could not get executions for authentication flow %s in realm %s: %s'
+ % (config["alias"], realm, str(e)))
+
+ def get_required_actions(self, realm='master'):
+ """
+ Get required actions.
+ :param realm: Realm name (not id).
+ :return: List of representations of the required actions.
+ """
+
+ try:
+ required_actions = json.load(
+ open_url(
+ URL_AUTHENTICATION_REQUIRED_ACTIONS.format(
+ url=self.baseurl,
+ realm=realm
+ ),
+ method='GET',
+ http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs
+ )
+ )
+
+ return required_actions
+ except Exception:
+ return None
+
+ def register_required_action(self, rep, realm='master'):
+ """
+ Register required action.
+ :param rep: JSON containing 'providerId', and 'name' attributes.
+ :param realm: Realm name (not id).
+ :return: Representation of the required action.
+ """
+
+ data = {
+ 'name': rep['name'],
+ 'providerId': rep['providerId']
+ }
+
+ try:
+ return open_url(
+ URL_AUTHENTICATION_REGISTER_REQUIRED_ACTION.format(
+ url=self.baseurl,
+ realm=realm
+ ),
+ method='POST',
+ http_agent=self.http_agent, headers=self.restheaders,
+ data=json.dumps(data),
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs
+ )
+ except Exception as e:
+ self.fail_open_url(
+ e,
+ msg='Unable to register required action %s in realm %s: %s'
+ % (rep["name"], realm, str(e))
+ )
+
+ def update_required_action(self, alias, rep, realm='master'):
+ """
+ Update required action.
+ :param alias: Alias of required action.
+ :param rep: JSON describing new state of required action.
+ :param realm: Realm name (not id).
+ :return: HTTPResponse object on success.
+ """
+
+ try:
+ return open_url(
+ URL_AUTHENTICATION_REQUIRED_ACTIONS_ALIAS.format(
+ url=self.baseurl,
+ alias=quote(alias, safe=''),
+ realm=realm
+ ),
+ method='PUT',
+ http_agent=self.http_agent, headers=self.restheaders,
+ data=json.dumps(rep),
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs
+ )
+ except Exception as e:
+ self.fail_open_url(
+ e,
+ msg='Unable to update required action %s in realm %s: %s'
+ % (alias, realm, str(e))
+ )
+
+ def delete_required_action(self, alias, realm='master'):
+ """
+ Delete required action.
+ :param alias: Alias of required action.
+ :param realm: Realm name (not id).
+ :return: HTTPResponse object on success.
+ """
+
+ try:
+ return open_url(
+ URL_AUTHENTICATION_REQUIRED_ACTIONS_ALIAS.format(
+ url=self.baseurl,
+ alias=quote(alias, safe=''),
+ realm=realm
+ ),
+ method='DELETE',
+ http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs
+ )
+ except Exception as e:
+ self.fail_open_url(
+ e,
+ msg='Unable to delete required action %s in realm %s: %s'
+ % (alias, realm, str(e))
+ )
def get_identity_providers(self, realm='master'):
""" Fetch representations for identity providers in a realm
@@ -2110,7 +2427,7 @@ class KeycloakAPI(object):
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of identity providers for realm %s: %s'
% (realm, str(e)))
except Exception as e:
- self.module.fail_json(msg='Could not obtain list of identity providers for realm %s: %s'
+ self.fail_open_url(e, msg='Could not obtain list of identity providers for realm %s: %s'
% (realm, str(e)))
def get_identity_provider(self, alias, realm='master'):
@@ -2127,7 +2444,7 @@ class KeycloakAPI(object):
if e.code == 404:
return None
else:
- self.module.fail_json(msg='Could not fetch identity provider %s in realm %s: %s'
+ self.fail_open_url(e, msg='Could not fetch identity provider %s in realm %s: %s'
% (alias, realm, str(e)))
except Exception as e:
self.module.fail_json(msg='Could not fetch identity provider %s in realm %s: %s'
@@ -2144,7 +2461,7 @@ class KeycloakAPI(object):
return open_url(idps_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
data=json.dumps(idprep), validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Could not create identity provider %s in realm %s: %s'
+ self.fail_open_url(e, msg='Could not create identity provider %s in realm %s: %s'
% (idprep['alias'], realm, str(e)))
def update_identity_provider(self, idprep, realm='master'):
@@ -2158,7 +2475,7 @@ class KeycloakAPI(object):
return open_url(idp_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
data=json.dumps(idprep), validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Could not update identity provider %s in realm %s: %s'
+ self.fail_open_url(e, msg='Could not update identity provider %s in realm %s: %s'
% (idprep['alias'], realm, str(e)))
def delete_identity_provider(self, alias, realm='master'):
@@ -2171,7 +2488,7 @@ class KeycloakAPI(object):
return open_url(idp_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Unable to delete identity provider %s in realm %s: %s'
+ self.fail_open_url(e, msg='Unable to delete identity provider %s in realm %s: %s'
% (alias, realm, str(e)))
def get_identity_provider_mappers(self, alias, realm='master'):
@@ -2189,7 +2506,7 @@ class KeycloakAPI(object):
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of identity provider mappers for idp %s in realm %s: %s'
% (alias, realm, str(e)))
except Exception as e:
- self.module.fail_json(msg='Could not obtain list of identity provider mappers for idp %s in realm %s: %s'
+ self.fail_open_url(e, msg='Could not obtain list of identity provider mappers for idp %s in realm %s: %s'
% (alias, realm, str(e)))
def get_identity_provider_mapper(self, mid, alias, realm='master'):
@@ -2208,7 +2525,7 @@ class KeycloakAPI(object):
if e.code == 404:
return None
else:
- self.module.fail_json(msg='Could not fetch mapper %s for identity provider %s in realm %s: %s'
+ self.fail_open_url(e, msg='Could not fetch mapper %s for identity provider %s in realm %s: %s'
% (mid, alias, realm, str(e)))
except Exception as e:
self.module.fail_json(msg='Could not fetch mapper %s for identity provider %s in realm %s: %s'
@@ -2226,7 +2543,7 @@ class KeycloakAPI(object):
return open_url(mappers_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
data=json.dumps(mapper), validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Could not create identity provider mapper %s for idp %s in realm %s: %s'
+ self.fail_open_url(e, msg='Could not create identity provider mapper %s for idp %s in realm %s: %s'
% (mapper['name'], alias, realm, str(e)))
def update_identity_provider_mapper(self, mapper, alias, realm='master'):
@@ -2241,7 +2558,7 @@ class KeycloakAPI(object):
return open_url(mapper_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
data=json.dumps(mapper), validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Could not update mapper %s for identity provider %s in realm %s: %s'
+ self.fail_open_url(e, msg='Could not update mapper %s for identity provider %s in realm %s: %s'
% (mapper['id'], alias, realm, str(e)))
def delete_identity_provider_mapper(self, mid, alias, realm='master'):
@@ -2255,7 +2572,7 @@ class KeycloakAPI(object):
return open_url(mapper_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Unable to delete mapper %s for identity provider %s in realm %s: %s'
+ self.fail_open_url(e, msg='Unable to delete mapper %s for identity provider %s in realm %s: %s'
% (mid, alias, realm, str(e)))
def get_components(self, filter=None, realm='master'):
@@ -2275,7 +2592,7 @@ class KeycloakAPI(object):
self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of components for realm %s: %s'
% (realm, str(e)))
except Exception as e:
- self.module.fail_json(msg='Could not obtain list of components for realm %s: %s'
+ self.fail_open_url(e, msg='Could not obtain list of components for realm %s: %s'
% (realm, str(e)))
def get_component(self, cid, realm='master'):
@@ -2292,7 +2609,7 @@ class KeycloakAPI(object):
if e.code == 404:
return None
else:
- self.module.fail_json(msg='Could not fetch component %s in realm %s: %s'
+ self.fail_open_url(e, msg='Could not fetch component %s in realm %s: %s'
% (cid, realm, str(e)))
except Exception as e:
self.module.fail_json(msg='Could not fetch component %s in realm %s: %s'
@@ -2315,7 +2632,7 @@ class KeycloakAPI(object):
return json.loads(to_native(open_url(comp_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
validate_certs=self.validate_certs).read()))
except Exception as e:
- self.module.fail_json(msg='Could not create component in realm %s: %s'
+ self.fail_open_url(e, msg='Could not create component in realm %s: %s'
% (realm, str(e)))
def update_component(self, comprep, realm='master'):
@@ -2332,7 +2649,7 @@ class KeycloakAPI(object):
return open_url(comp_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
data=json.dumps(comprep), validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Could not update component %s in realm %s: %s'
+ self.fail_open_url(e, msg='Could not update component %s in realm %s: %s'
% (cid, realm, str(e)))
def delete_component(self, cid, realm='master'):
@@ -2345,12 +2662,12 @@ class KeycloakAPI(object):
return open_url(comp_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Unable to delete component %s in realm %s: %s'
+ self.fail_open_url(e, msg='Unable to delete component %s in realm %s: %s'
% (cid, realm, str(e)))
def get_authz_authorization_scope_by_name(self, name, client_id, realm):
url = URL_AUTHZ_AUTHORIZATION_SCOPES.format(url=self.baseurl, client_id=client_id, realm=realm)
- search_url = "%s/search?name=%s" % (url, quote(name))
+ search_url = "%s/search?name=%s" % (url, quote(name, safe=''))
try:
return json.loads(to_native(open_url(search_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
@@ -2367,7 +2684,7 @@ class KeycloakAPI(object):
return open_url(url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
data=json.dumps(payload), validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Could not create authorization scope %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e)))
+ self.fail_open_url(e, msg='Could not create authorization scope %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e)))
def update_authz_authorization_scope(self, payload, id, client_id, realm):
"""Update an authorization scope for a Keycloak client"""
@@ -2377,7 +2694,7 @@ class KeycloakAPI(object):
return open_url(url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
data=json.dumps(payload), validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Could not create update scope %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e)))
+ self.fail_open_url(e, msg='Could not create update scope %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e)))
def remove_authz_authorization_scope(self, id, client_id, realm):
"""Remove an authorization scope from a Keycloak client"""
@@ -2387,4 +2704,355 @@ class KeycloakAPI(object):
return open_url(url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
validate_certs=self.validate_certs)
except Exception as e:
- self.module.fail_json(msg='Could not delete scope %s for client %s in realm %s: %s' % (id, client_id, realm, str(e)))
+ self.fail_open_url(e, msg='Could not delete scope %s for client %s in realm %s: %s' % (id, client_id, realm, str(e)))
+
+ def get_user_by_id(self, user_id, realm='master'):
+ """
+ Get a User by its ID.
+ :param user_id: ID of the user.
+ :param realm: Realm
+ :return: Representation of the user.
+ """
+ try:
+ user_url = URL_USER.format(
+ url=self.baseurl,
+ realm=realm,
+ id=user_id)
+ userrep = json.load(
+ open_url(
+ user_url,
+ method='GET',
+ http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs))
+ return userrep
+ except Exception as e:
+ self.fail_open_url(e, msg='Could not get user %s in realm %s: %s'
+ % (user_id, realm, str(e)))
+
+ def create_user(self, userrep, realm='master'):
+ """
+ Create a new User.
+ :param userrep: Representation of the user to create
+ :param realm: Realm
+ :return: Representation of the user created.
+ """
+ try:
+ if 'attributes' in userrep and isinstance(userrep['attributes'], list):
+ attributes = copy.deepcopy(userrep['attributes'])
+ userrep['attributes'] = self.convert_user_attributes_to_keycloak_dict(attributes=attributes)
+ users_url = URL_USERS.format(
+ url=self.baseurl,
+ realm=realm)
+ open_url(users_url,
+ method='POST',
+ http_agent=self.http_agent, headers=self.restheaders,
+ data=json.dumps(userrep),
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+ created_user = self.get_user_by_username(
+ username=userrep['username'],
+ realm=realm)
+ return created_user
+ except Exception as e:
+ self.fail_open_url(e, msg='Could not create user %s in realm %s: %s'
+ % (userrep['username'], realm, str(e)))
+
+ def convert_user_attributes_to_keycloak_dict(self, attributes):
+ keycloak_user_attributes_dict = {}
+ for attribute in attributes:
+ if ('state' not in attribute or attribute['state'] == 'present') and 'name' in attribute:
+ keycloak_user_attributes_dict[attribute['name']] = attribute['values'] if 'values' in attribute else []
+ return keycloak_user_attributes_dict
+
+ def convert_keycloak_user_attributes_dict_to_module_list(self, attributes):
+ module_attributes_list = []
+ for key in attributes:
+ attr = {}
+ attr['name'] = key
+ attr['values'] = attributes[key]
+ module_attributes_list.append(attr)
+ return module_attributes_list
+
+ def update_user(self, userrep, realm='master'):
+ """
+ Update a User.
+ :param userrep: Representation of the user to update. This representation must include the ID of the user.
+ :param realm: Realm
+ :return: Representation of the updated user.
+ """
+ try:
+ if 'attributes' in userrep and isinstance(userrep['attributes'], list):
+ attributes = copy.deepcopy(userrep['attributes'])
+ userrep['attributes'] = self.convert_user_attributes_to_keycloak_dict(attributes=attributes)
+ user_url = URL_USER.format(
+ url=self.baseurl,
+ realm=realm,
+ id=userrep["id"])
+ open_url(
+ user_url,
+ method='PUT',
+ http_agent=self.http_agent, headers=self.restheaders,
+ data=json.dumps(userrep),
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+ updated_user = self.get_user_by_id(
+ user_id=userrep['id'],
+ realm=realm)
+ return updated_user
+ except Exception as e:
+ self.fail_open_url(e, msg='Could not update user %s in realm %s: %s'
+ % (userrep['username'], realm, str(e)))
+
+ def delete_user(self, user_id, realm='master'):
+ """
+ Delete a User.
+ :param user_id: ID of the user to be deleted
+ :param realm: Realm
+ :return: HTTP response.
+ """
+ try:
+ user_url = URL_USER.format(
+ url=self.baseurl,
+ realm=realm,
+ id=user_id)
+ return open_url(
+ user_url,
+ method='DELETE',
+ http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+ except Exception as e:
+ self.fail_open_url(e, msg='Could not delete user %s in realm %s: %s'
+ % (user_id, realm, str(e)))
+
+ def get_user_groups(self, user_id, realm='master'):
+ """
+ Get groups for a user.
+ :param user_id: User ID
+ :param realm: Realm
+ :return: Representation of the client groups.
+ """
+ try:
+ groups = []
+ user_groups_url = URL_USER_GROUPS.format(
+ url=self.baseurl,
+ realm=realm,
+ id=user_id)
+ user_groups = json.load(
+ open_url(
+ user_groups_url,
+ method='GET',
+ http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs))
+ for user_group in user_groups:
+ groups.append(user_group["name"])
+ return groups
+ except Exception as e:
+ self.fail_open_url(e, msg='Could not get groups for user %s in realm %s: %s'
+ % (user_id, realm, str(e)))
+
+ def add_user_in_group(self, user_id, group_id, realm='master'):
+ """
+ Add a user to a group.
+ :param user_id: User ID
+ :param group_id: Group Id to add the user to.
+ :param realm: Realm
+ :return: HTTP Response
+ """
+ try:
+ user_group_url = URL_USER_GROUP.format(
+ url=self.baseurl,
+ realm=realm,
+ id=user_id,
+ group_id=group_id)
+ return open_url(
+ user_group_url,
+ method='PUT',
+ http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+ except Exception as e:
+ self.fail_open_url(e, msg='Could not add user %s in group %s in realm %s: %s'
+ % (user_id, group_id, realm, str(e)))
+
+ def remove_user_from_group(self, user_id, group_id, realm='master'):
+ """
+ Remove a user from a group for a user.
+ :param user_id: User ID
+ :param group_id: Group Id to add the user to.
+ :param realm: Realm
+ :return: HTTP response
+ """
+ try:
+ user_group_url = URL_USER_GROUP.format(
+ url=self.baseurl,
+ realm=realm,
+ id=user_id,
+ group_id=group_id)
+ return open_url(
+ user_group_url,
+ method='DELETE',
+ http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+ except Exception as e:
+ self.fail_open_url(e, msg='Could not remove user %s from group %s in realm %s: %s'
+ % (user_id, group_id, realm, str(e)))
+
+ def update_user_groups_membership(self, userrep, groups, realm='master'):
+ """
+ Update user's group membership
+ :param userrep: Representation of the user. This representation must include the ID.
+ :param realm: Realm
+ :return: True if group membership has been changed. False Otherwise.
+ """
+ changed = False
+ try:
+ user_existing_groups = self.get_user_groups(
+ user_id=userrep['id'],
+ realm=realm)
+ groups_to_add_and_remove = self.extract_groups_to_add_to_and_remove_from_user(groups)
+ # If group membership need to be changed
+ if not is_struct_included(groups_to_add_and_remove['add'], user_existing_groups):
+ # Get available groups in the realm
+ realm_groups = self.get_groups(realm=realm)
+ for realm_group in realm_groups:
+ if "name" in realm_group and realm_group["name"] in groups_to_add_and_remove['add']:
+ self.add_user_in_group(
+ user_id=userrep["id"],
+ group_id=realm_group["id"],
+ realm=realm)
+ changed = True
+ elif "name" in realm_group and realm_group['name'] in groups_to_add_and_remove['remove']:
+ self.remove_user_from_group(
+ user_id=userrep['id'],
+ group_id=realm_group['id'],
+ realm=realm)
+ changed = True
+ return changed
+ except Exception as e:
+ self.module.fail_json(msg='Could not update group membership for user %s in realm %s: %s'
+ % (userrep['id]'], realm, str(e)))
+
+ def extract_groups_to_add_to_and_remove_from_user(self, groups):
+ groups_extract = {}
+ groups_to_add = []
+ groups_to_remove = []
+ if isinstance(groups, list) and len(groups) > 0:
+ for group in groups:
+ group_name = group['name'] if isinstance(group, dict) and 'name' in group else group
+ if isinstance(group, dict) and ('state' not in group or group['state'] == 'present'):
+ groups_to_add.append(group_name)
+ else:
+ groups_to_remove.append(group_name)
+ groups_extract['add'] = groups_to_add
+ groups_extract['remove'] = groups_to_remove
+
+ return groups_extract
+
+ def convert_user_group_list_of_str_to_list_of_dict(self, groups):
+ list_of_groups = []
+ if isinstance(groups, list) and len(groups) > 0:
+ for group in groups:
+ if isinstance(group, str):
+ group_dict = {}
+ group_dict['name'] = group
+ list_of_groups.append(group_dict)
+ return list_of_groups
+
+ def create_authz_custom_policy(self, policy_type, payload, client_id, realm):
+ """Create a custom policy for a Keycloak client"""
+ url = URL_AUTHZ_CUSTOM_POLICY.format(url=self.baseurl, policy_type=policy_type, client_id=client_id, realm=realm)
+
+ try:
+ return open_url(url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(payload), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.fail_open_url(e, msg='Could not create permission %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e)))
+
+ def remove_authz_custom_policy(self, policy_id, client_id, realm):
+ """Remove a custom policy from a Keycloak client"""
+ url = URL_AUTHZ_CUSTOM_POLICIES.format(url=self.baseurl, client_id=client_id, realm=realm)
+ delete_url = "%s/%s" % (url, policy_id)
+
+ try:
+ return open_url(delete_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+ except Exception as e:
+ self.fail_open_url(e, msg='Could not delete custom policy %s for client %s in realm %s: %s' % (id, client_id, realm, str(e)))
+
+ def get_authz_permission_by_name(self, name, client_id, realm):
+ """Get authorization permission by name"""
+ url = URL_AUTHZ_POLICIES.format(url=self.baseurl, client_id=client_id, realm=realm)
+ search_url = "%s/search?name=%s" % (url, name.replace(' ', '%20'))
+
+ try:
+ return json.loads(to_native(open_url(search_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except Exception:
+ return False
+
+ def create_authz_permission(self, payload, permission_type, client_id, realm):
+ """Create an authorization permission for a Keycloak client"""
+ url = URL_AUTHZ_PERMISSIONS.format(url=self.baseurl, permission_type=permission_type, client_id=client_id, realm=realm)
+
+ try:
+ return open_url(url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(payload), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.fail_open_url(e, msg='Could not create permission %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e)))
+
+ def remove_authz_permission(self, id, client_id, realm):
+ """Create an authorization permission for a Keycloak client"""
+ url = URL_AUTHZ_POLICY.format(url=self.baseurl, id=id, client_id=client_id, realm=realm)
+
+ try:
+ return open_url(url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+ except Exception as e:
+ self.fail_open_url(e, msg='Could not delete permission %s for client %s in realm %s: %s' % (id, client_id, realm, str(e)))
+
+ def update_authz_permission(self, payload, permission_type, id, client_id, realm):
+ """Update a permission for a Keycloak client"""
+ url = URL_AUTHZ_PERMISSION.format(url=self.baseurl, permission_type=permission_type, id=id, client_id=client_id, realm=realm)
+
+ try:
+ return open_url(url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(payload), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.fail_open_url(e, msg='Could not create update permission %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e)))
+
+ def get_authz_resource_by_name(self, name, client_id, realm):
+ """Get authorization resource by name"""
+ url = URL_AUTHZ_RESOURCES.format(url=self.baseurl, client_id=client_id, realm=realm)
+ search_url = "%s/search?name=%s" % (url, name.replace(' ', '%20'))
+
+ try:
+ return json.loads(to_native(open_url(search_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except Exception:
+ return False
+
+ def get_authz_policy_by_name(self, name, client_id, realm):
+ """Get authorization policy by name"""
+ url = URL_AUTHZ_POLICIES.format(url=self.baseurl, client_id=client_id, realm=realm)
+ search_url = "%s/search?name=%s&permission=false" % (url, name.replace(' ', '%20'))
+
+ try:
+ return json.loads(to_native(open_url(search_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except Exception:
+ return False
+
+ def fail_open_url(self, e, msg, **kwargs):
+ try:
+ if isinstance(e, HTTPError):
+ msg = "%s: %s" % (msg, to_native(e.read()))
+ except Exception as ingore:
+ pass
+ self.module.fail_json(msg, **kwargs)
diff --git a/ansible_collections/community/general/plugins/module_utils/ldap.py b/ansible_collections/community/general/plugins/module_utils/ldap.py
index 655371321..fccf07304 100644
--- a/ansible_collections/community/general/plugins/module_utils/ldap.py
+++ b/ansible_collections/community/general/plugins/module_utils/ldap.py
@@ -42,11 +42,17 @@ def gen_specs(**specs):
'validate_certs': dict(default=True, type='bool'),
'sasl_class': dict(choices=['external', 'gssapi'], default='external', type='str'),
'xorder_discovery': dict(choices=['enable', 'auto', 'disable'], default='auto', type='str'),
+ 'client_cert': dict(default=None, type='path'),
+ 'client_key': dict(default=None, type='path'),
})
return specs
+def ldap_required_together():
+ return [['client_cert', 'client_key']]
+
+
class LdapGeneric(object):
def __init__(self, module):
# Shortcuts
@@ -60,6 +66,8 @@ class LdapGeneric(object):
self.verify_cert = self.module.params['validate_certs']
self.sasl_class = self.module.params['sasl_class']
self.xorder_discovery = self.module.params['xorder_discovery']
+ self.client_cert = self.module.params['client_cert']
+ self.client_key = self.module.params['client_key']
# Establish connection
self.connection = self._connect_to_ldap()
@@ -102,6 +110,10 @@ class LdapGeneric(object):
if self.ca_path:
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, self.ca_path)
+ if self.client_cert and self.client_key:
+ ldap.set_option(ldap.OPT_X_TLS_CERTFILE, self.client_cert)
+ ldap.set_option(ldap.OPT_X_TLS_KEYFILE, self.client_key)
+
connection = ldap.initialize(self.server_uri)
if self.referrals_chasing == 'disabled':
@@ -127,5 +139,7 @@ class LdapGeneric(object):
def _xorder_dn(self):
# match X_ORDERed DNs
- regex = r"\w+=\{\d+\}.+"
- return re.match(regex, self.module.params['dn']) is not None
+ regex = r".+\{\d+\}.+"
+ explode_dn = ldap.dn.explode_dn(self.module.params['dn'])
+
+ return re.match(regex, explode_dn[0]) is not None
diff --git a/ansible_collections/community/general/plugins/module_utils/locale_gen.py b/ansible_collections/community/general/plugins/module_utils/locale_gen.py
new file mode 100644
index 000000000..ca35e17d3
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/locale_gen.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2023, Alexei Znamensky <russoz@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
+
+
+def locale_runner(module):
+ runner = CmdRunner(
+ module,
+ command=["locale", "-a"],
+ check_rc=True,
+ )
+ return runner
+
+
+def locale_gen_runner(module):
+ runner = CmdRunner(
+ module,
+ command="locale-gen",
+ arg_formats=dict(
+ name=cmd_runner_fmt.as_list(),
+ purge=cmd_runner_fmt.as_fixed('--purge'),
+ ),
+ check_rc=True,
+ )
+ return runner
diff --git a/ansible_collections/community/general/plugins/module_utils/lxd.py b/ansible_collections/community/general/plugins/module_utils/lxd.py
index 7f5362532..68a1c690f 100644
--- a/ansible_collections/community/general/plugins/module_utils/lxd.py
+++ b/ansible_collections/community/general/plugins/module_utils/lxd.py
@@ -41,7 +41,7 @@ class LXDClientException(Exception):
class LXDClient(object):
- def __init__(self, url, key_file=None, cert_file=None, debug=False):
+ def __init__(self, url, key_file=None, cert_file=None, debug=False, server_cert_file=None, server_check_hostname=True):
"""LXD Client.
:param url: The URL of the LXD server. (e.g. unix:/var/lib/lxd/unix.socket or https://127.0.0.1)
@@ -52,6 +52,10 @@ class LXDClient(object):
:type cert_file: ``str``
:param debug: The debug flag. The request and response are stored in logs when debug is true.
:type debug: ``bool``
+ :param server_cert_file: The path of the server certificate file.
+ :type server_cert_file: ``str``
+ :param server_check_hostname: Whether to check the server's hostname as part of TLS verification.
+ :type debug: ``bool``
"""
self.url = url
self.debug = debug
@@ -61,6 +65,10 @@ class LXDClient(object):
self.key_file = key_file
parts = generic_urlparse(urlparse(self.url))
ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
+ if server_cert_file:
+ # Check that the received cert is signed by the provided server_cert_file
+ ctx.load_verify_locations(cafile=server_cert_file)
+ ctx.check_hostname = server_check_hostname
ctx.load_cert_chain(cert_file, keyfile=key_file)
self.connection = HTTPSConnection(parts.get('netloc'), context=ctx)
elif url.startswith('unix:'):
diff --git a/ansible_collections/community/general/plugins/module_utils/memset.py b/ansible_collections/community/general/plugins/module_utils/memset.py
index 374b40ff4..8ddf76907 100644
--- a/ansible_collections/community/general/plugins/module_utils/memset.py
+++ b/ansible_collections/community/general/plugins/module_utils/memset.py
@@ -14,8 +14,9 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.six.moves.urllib.parse import urlencode
-from ansible.module_utils.urls import open_url, urllib_error
+from ansible.module_utils.urls import open_url
from ansible.module_utils.basic import json
+import ansible.module_utils.six.moves.urllib.error as urllib_error
class Response(object):
@@ -78,7 +79,7 @@ def memset_api_call(api_key, api_method, payload=None):
msg = "Memset API returned an error ({0}, {1})." . format(response.json()['error_type'], response.json()['error'])
except urllib_error.URLError as e:
has_failed = True
- msg = "An URLError occured ({0})." . format(type(e))
+ msg = "An URLError occurred ({0})." . format(type(e))
response.stderr = "{0}" . format(e)
if msg is None:
diff --git a/ansible_collections/community/general/plugins/module_utils/mh/mixins/cmd.py b/ansible_collections/community/general/plugins/module_utils/mh/mixins/cmd.py
deleted file mode 100644
index a7d379394..000000000
--- a/ansible_collections/community/general/plugins/module_utils/mh/mixins/cmd.py
+++ /dev/null
@@ -1,205 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2020, Alexei Znamensky <russoz@gmail.com>
-# Copyright (c) 2020, Ansible Project
-# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
-# SPDX-License-Identifier: BSD-2-Clause
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-from functools import partial
-
-
-class ArgFormat(object):
- """
- Argument formatter for use as a command line parameter. Used in CmdMixin.
- """
- BOOLEAN = 0
- PRINTF = 1
- FORMAT = 2
- BOOLEAN_NOT = 3
-
- @staticmethod
- def stars_deco(num):
- if num == 1:
- def deco(f):
- return lambda v: f(*v)
- return deco
- elif num == 2:
- def deco(f):
- return lambda v: f(**v)
- return deco
-
- return lambda f: f
-
- def __init__(self, name, fmt=None, style=FORMAT, stars=0):
- """
- THIS CLASS IS BEING DEPRECATED.
- It was never meant to be used outside the scope of CmdMixin, and CmdMixin is being deprecated.
- See the deprecation notice in ``CmdMixin.__init__()`` below.
-
- Creates a CLI-formatter for one specific argument. The argument may be a module parameter or just a named parameter for
- the CLI command execution.
- :param name: Name of the argument to be formatted
- :param fmt: Either a str to be formatted (using or not printf-style) or a callable that does that
- :param style: Whether arg_format (as str) should use printf-style formatting.
- Ignored if arg_format is None or not a str (should be callable).
- :param stars: A int with 0, 1 or 2 value, indicating to formatting the value as: value, *value or **value
- """
- def printf_fmt(_fmt, v):
- try:
- return [_fmt % v]
- except TypeError as e:
- if e.args[0] != 'not all arguments converted during string formatting':
- raise
- return [_fmt]
-
- _fmts = {
- ArgFormat.BOOLEAN: lambda _fmt, v: ([_fmt] if bool(v) else []),
- ArgFormat.BOOLEAN_NOT: lambda _fmt, v: ([] if bool(v) else [_fmt]),
- ArgFormat.PRINTF: printf_fmt,
- ArgFormat.FORMAT: lambda _fmt, v: [_fmt.format(v)],
- }
-
- self.name = name
- self.stars = stars
- self.style = style
-
- if fmt is None:
- fmt = "{0}"
- style = ArgFormat.FORMAT
-
- if isinstance(fmt, str):
- func = _fmts[style]
- self.arg_format = partial(func, fmt)
- elif isinstance(fmt, list) or isinstance(fmt, tuple):
- self.arg_format = lambda v: [_fmts[style](f, v)[0] for f in fmt]
- elif hasattr(fmt, '__call__'):
- self.arg_format = fmt
- else:
- raise TypeError('Parameter fmt must be either: a string, a list/tuple of '
- 'strings or a function: type={0}, value={1}'.format(type(fmt), fmt))
-
- if stars:
- self.arg_format = (self.stars_deco(stars))(self.arg_format)
-
- def to_text(self, value):
- if value is None and self.style != ArgFormat.BOOLEAN_NOT:
- return []
- func = self.arg_format
- return [str(p) for p in func(value)]
-
-
-class CmdMixin(object):
- """
- THIS CLASS IS BEING DEPRECATED.
- See the deprecation notice in ``CmdMixin.__init__()`` below.
-
- Mixin for mapping module options to running a CLI command with its arguments.
- """
- command = None
- command_args_formats = {}
- run_command_fixed_options = {}
- check_rc = False
- force_lang = "C"
-
- @property
- def module_formats(self):
- result = {}
- for param in self.module.params.keys():
- result[param] = ArgFormat(param)
- return result
-
- @property
- def custom_formats(self):
- result = {}
- for param, fmt_spec in self.command_args_formats.items():
- result[param] = ArgFormat(param, **fmt_spec)
- return result
-
- def __init__(self, *args, **kwargs):
- super(CmdMixin, self).__init__(*args, **kwargs)
- self.module.deprecate(
- 'The CmdMixin used in classes CmdModuleHelper and CmdStateModuleHelper is being deprecated. '
- 'Modules should use community.general.plugins.module_utils.cmd_runner.CmdRunner instead.',
- version='8.0.0',
- collection_name='community.general',
- )
-
- def _calculate_args(self, extra_params=None, params=None):
- def add_arg_formatted_param(_cmd_args, arg_format, _value):
- args = list(arg_format.to_text(_value))
- return _cmd_args + args
-
- def find_format(_param):
- return self.custom_formats.get(_param, self.module_formats.get(_param))
-
- extra_params = extra_params or dict()
- cmd_args = list([self.command]) if isinstance(self.command, str) else list(self.command)
- try:
- cmd_args[0] = self.module.get_bin_path(cmd_args[0], required=True)
- except ValueError:
- pass
- param_list = params if params else self.vars.keys()
-
- for param in param_list:
- if isinstance(param, dict):
- if len(param) != 1:
- self.do_raise("run_command parameter as a dict must contain only one key: {0}".format(param))
- _param = list(param.keys())[0]
- fmt = find_format(_param)
- value = param[_param]
- elif isinstance(param, str):
- if param in self.vars.keys():
- fmt = find_format(param)
- value = self.vars[param]
- elif param in extra_params:
- fmt = find_format(param)
- value = extra_params[param]
- else:
- self.do_raise('Cannot determine value for parameter: {0}'.format(param))
- else:
- self.do_raise("run_command parameter must be either a str or a dict: {0}".format(param))
- cmd_args = add_arg_formatted_param(cmd_args, fmt, value)
-
- return cmd_args
-
- def process_command_output(self, rc, out, err):
- return rc, out, err
-
- def run_command(self,
- extra_params=None,
- params=None,
- process_output=None,
- publish_rc=True,
- publish_out=True,
- publish_err=True,
- publish_cmd=True,
- *args, **kwargs):
- cmd_args = self._calculate_args(extra_params, params)
- options = dict(self.run_command_fixed_options)
- options['check_rc'] = options.get('check_rc', self.check_rc)
- options.update(kwargs)
- env_update = dict(options.get('environ_update', {}))
- if self.force_lang:
- env_update.update({
- 'LANGUAGE': self.force_lang,
- 'LC_ALL': self.force_lang,
- })
- self.update_output(force_lang=self.force_lang)
- options['environ_update'] = env_update
- rc, out, err = self.module.run_command(cmd_args, *args, **options)
- if publish_rc:
- self.update_output(rc=rc)
- if publish_out:
- self.update_output(stdout=out)
- if publish_err:
- self.update_output(stderr=err)
- if publish_cmd:
- self.update_output(cmd_args=cmd_args)
- if process_output is None:
- _process = self.process_command_output
- else:
- _process = process_output
-
- return _process(rc, out, err)
diff --git a/ansible_collections/community/general/plugins/module_utils/mh/mixins/deps.py b/ansible_collections/community/general/plugins/module_utils/mh/mixins/deps.py
index bab8c090b..772df8c0e 100644
--- a/ansible_collections/community/general/plugins/module_utils/mh/mixins/deps.py
+++ b/ansible_collections/community/general/plugins/module_utils/mh/mixins/deps.py
@@ -38,6 +38,12 @@ class DependencyCtxMgr(object):
class DependencyMixin(ModuleHelperBase):
+ """
+ THIS CLASS IS BEING DEPRECATED.
+ See the deprecation notice in ``DependencyMixin.fail_on_missing_deps()`` below.
+
+ Mixin for mapping module options to running a CLI command with its arguments.
+ """
_dependencies = []
@classmethod
@@ -46,6 +52,14 @@ class DependencyMixin(ModuleHelperBase):
return cls._dependencies[-1]
def fail_on_missing_deps(self):
+ if not self._dependencies:
+ return
+ self.module.deprecate(
+ 'The DependencyMixin is being deprecated. '
+ 'Modules should use community.general.plugins.module_utils.deps instead.',
+ version='9.0.0',
+ collection_name='community.general',
+ )
for d in self._dependencies:
if not d.has_it:
self.module.fail_json(changed=False,
diff --git a/ansible_collections/community/general/plugins/module_utils/mh/mixins/vars.py b/ansible_collections/community/general/plugins/module_utils/mh/mixins/vars.py
index 6dfb29bab..91f4e4a18 100644
--- a/ansible_collections/community/general/plugins/module_utils/mh/mixins/vars.py
+++ b/ansible_collections/community/general/plugins/module_utils/mh/mixins/vars.py
@@ -11,6 +11,13 @@ import copy
class VarMeta(object):
+ """
+ DEPRECATION WARNING
+
+ This class is deprecated and will be removed in community.general 10.0.0
+ Modules should use the VarDict from plugins/module_utils/vardict.py instead.
+ """
+
NOTHING = object()
def __init__(self, diff=False, output=True, change=None, fact=False):
@@ -60,6 +67,12 @@ class VarMeta(object):
class VarDict(object):
+ """
+ DEPRECATION WARNING
+
+ This class is deprecated and will be removed in community.general 10.0.0
+ Modules should use the VarDict from plugins/module_utils/vardict.py instead.
+ """
def __init__(self):
self._data = dict()
self._meta = dict()
@@ -123,7 +136,12 @@ class VarDict(object):
class VarsMixin(object):
+ """
+ DEPRECATION WARNING
+ This class is deprecated and will be removed in community.general 10.0.0
+ Modules should use the VarDict from plugins/module_utils/vardict.py instead.
+ """
def __init__(self, module=None):
self.vars = VarDict()
super(VarsMixin, self).__init__(module)
diff --git a/ansible_collections/community/general/plugins/module_utils/mh/module_helper.py b/ansible_collections/community/general/plugins/module_utils/mh/module_helper.py
index c5973262d..c33efb16b 100644
--- a/ansible_collections/community/general/plugins/module_utils/mh/module_helper.py
+++ b/ansible_collections/community/general/plugins/module_utils/mh/module_helper.py
@@ -7,11 +7,11 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
+
from ansible.module_utils.common.dict_transformations import dict_merge
# (TODO: remove AnsibleModule!) pylint: disable-next=unused-import
from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase, AnsibleModule # noqa: F401
-from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd import CmdMixin
from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin
from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyMixin
from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarsMixin
@@ -65,19 +65,3 @@ class ModuleHelper(DeprecateAttrsMixin, VarsMixin, DependencyMixin, ModuleHelper
class StateModuleHelper(StateMixin, ModuleHelper):
pass
-
-
-class CmdModuleHelper(CmdMixin, ModuleHelper):
- """
- THIS CLASS IS BEING DEPRECATED.
- See the deprecation notice in ``CmdMixin.__init__()``.
- """
- pass
-
-
-class CmdStateModuleHelper(CmdMixin, StateMixin, ModuleHelper):
- """
- THIS CLASS IS BEING DEPRECATED.
- See the deprecation notice in ``CmdMixin.__init__()``.
- """
- pass
diff --git a/ansible_collections/community/general/plugins/module_utils/module_helper.py b/ansible_collections/community/general/plugins/module_utils/module_helper.py
index 8a51de665..5aa16c057 100644
--- a/ansible_collections/community/general/plugins/module_utils/module_helper.py
+++ b/ansible_collections/community/general/plugins/module_utils/module_helper.py
@@ -7,14 +7,16 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
+# pylint: disable=unused-import
-from ansible_collections.community.general.plugins.module_utils.mh.module_helper import ( # noqa: F401, pylint: disable=unused-import
- ModuleHelper, StateModuleHelper, CmdModuleHelper, CmdStateModuleHelper, AnsibleModule
+
+from ansible_collections.community.general.plugins.module_utils.mh.module_helper import (
+ ModuleHelper, StateModuleHelper, AnsibleModule
+)
+from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin # noqa: F401
+from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyCtxMgr, DependencyMixin # noqa: F401
+from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException # noqa: F401
+from ansible_collections.community.general.plugins.module_utils.mh.deco import (
+ cause_changes, module_fails_on_exception, check_mode_skip, check_mode_skip_returns,
)
-from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd import CmdMixin, ArgFormat # noqa: F401, pylint: disable=unused-import
-from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin # noqa: F401, pylint: disable=unused-import
-from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyCtxMgr # noqa: F401, pylint: disable=unused-import
-from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException # noqa: F401, pylint: disable=unused-import
-# pylint: disable-next=unused-import
-from ansible_collections.community.general.plugins.module_utils.mh.deco import cause_changes, module_fails_on_exception # noqa: F401
-from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarMeta, VarDict # noqa: F401, pylint: disable=unused-import
+from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarMeta, VarDict, VarsMixin # noqa: F401
diff --git a/ansible_collections/community/general/plugins/module_utils/net_tools/pritunl/api.py b/ansible_collections/community/general/plugins/module_utils/net_tools/pritunl/api.py
index cd2abc568..cc6db257d 100644
--- a/ansible_collections/community/general/plugins/module_utils/net_tools/pritunl/api.py
+++ b/ansible_collections/community/general/plugins/module_utils/net_tools/pritunl/api.py
@@ -79,7 +79,7 @@ def _post_pritunl_organization(
api_secret=api_secret,
base_url=base_url,
method="POST",
- path="/organization/%s",
+ path="/organization",
headers={"Content-Type": "application/json"},
data=json.dumps(organization_data),
validate_certs=validate_certs,
@@ -220,7 +220,7 @@ def post_pritunl_organization(
api_secret=api_secret,
base_url=base_url,
organization_data={"name": organization_name},
- validate_certs=True,
+ validate_certs=validate_certs,
)
if response.getcode() != 200:
@@ -248,7 +248,7 @@ def post_pritunl_user(
base_url=base_url,
organization_id=organization_id,
user_data=user_data,
- validate_certs=True,
+ validate_certs=validate_certs,
)
if response.getcode() != 200:
@@ -267,7 +267,7 @@ def post_pritunl_user(
organization_id=organization_id,
user_data=user_data,
user_id=user_id,
- validate_certs=True,
+ validate_certs=validate_certs,
)
if response.getcode() != 200:
@@ -287,7 +287,7 @@ def delete_pritunl_organization(
api_secret=api_secret,
base_url=base_url,
organization_id=organization_id,
- validate_certs=True,
+ validate_certs=validate_certs,
)
if response.getcode() != 200:
@@ -307,7 +307,7 @@ def delete_pritunl_user(
base_url=base_url,
organization_id=organization_id,
user_id=user_id,
- validate_certs=True,
+ validate_certs=validate_certs,
)
if response.getcode() != 200:
@@ -331,7 +331,7 @@ def pritunl_auth_request(
):
"""
Send an API call to a Pritunl server.
- Taken from https://pritunl.com/api and adaped work with Ansible open_url
+ Taken from https://pritunl.com/api and adapted to work with Ansible open_url
"""
auth_timestamp = str(int(time.time()))
auth_nonce = uuid.uuid4().hex
diff --git a/ansible_collections/community/general/plugins/module_utils/ocapi_utils.py b/ansible_collections/community/general/plugins/module_utils/ocapi_utils.py
index acc2ceae4..232c91506 100644
--- a/ansible_collections/community/general/plugins/module_utils/ocapi_utils.py
+++ b/ansible_collections/community/general/plugins/module_utils/ocapi_utils.py
@@ -432,7 +432,7 @@ class OcapiUtils(object):
else:
return response
details = response["data"]["Status"].get("Details")
- if type(details) is str:
+ if isinstance(details, str):
details = [details]
health_list = response["data"]["Status"]["Health"]
return_value = {
diff --git a/ansible_collections/community/general/plugins/module_utils/oracle/oci_utils.py b/ansible_collections/community/general/plugins/module_utils/oracle/oci_utils.py
index 3d9c20f2a..392692e7d 100644
--- a/ansible_collections/community/general/plugins/module_utils/oracle/oci_utils.py
+++ b/ansible_collections/community/general/plugins/module_utils/oracle/oci_utils.py
@@ -434,7 +434,7 @@ def check_and_update_attributes(
target_instance, attr_name, input_value, existing_value, changed
):
"""
- This function checks the difference between two resource attributes of literal types and sets the attrbute
+ This function checks the difference between two resource attributes of literal types and sets the attribute
value in the target instance type holding the attribute.
:param target_instance: The instance which contains the attribute whose values to be compared
:param attr_name: Name of the attribute whose value required to be compared
@@ -561,7 +561,7 @@ def are_lists_equal(s, t):
if s is None and t is None:
return True
- if (s is None and len(t) >= 0) or (t is None and len(s) >= 0) or (len(s) != len(t)):
+ if s is None or t is None or (len(s) != len(t)):
return False
if len(s) == 0:
@@ -570,7 +570,7 @@ def are_lists_equal(s, t):
s = to_dict(s)
t = to_dict(t)
- if type(s[0]) == dict:
+ if isinstance(s[0], dict):
# Handle list of dicts. Dictionary returned by the API may have additional keys. For example, a get call on
# service gateway has an attribute `services` which is a list of `ServiceIdResponseDetails`. This has a key
# `service_name` which is not provided in the list of `services` by a user while making an update call; only
@@ -604,9 +604,9 @@ def get_attr_to_update(get_fn, kwargs_get, module, update_attributes):
user_provided_attr_value = module.params.get(attr, None)
unequal_list_attr = (
- type(resources_attr_value) == list or type(user_provided_attr_value) == list
+ isinstance(resources_attr_value, list) or isinstance(user_provided_attr_value, list)
) and not are_lists_equal(user_provided_attr_value, resources_attr_value)
- unequal_attr = type(resources_attr_value) != list and to_dict(
+ unequal_attr = not isinstance(resources_attr_value, list) and to_dict(
resources_attr_value
) != to_dict(user_provided_attr_value)
if unequal_list_attr or unequal_attr:
@@ -785,7 +785,7 @@ def _get_attributes_to_consider(exclude_attributes, model, module):
attributes_to_consider = list(model.attribute_map)
if "freeform_tags" in attributes_to_consider:
attributes_to_consider.remove("freeform_tags")
- # Temporarily removing node_count as the exisiting resource does not reflect it
+ # Temporarily removing node_count as the existing resource does not reflect it
if "node_count" in attributes_to_consider:
attributes_to_consider.remove("node_count")
_debug("attributes to consider: {0}".format(attributes_to_consider))
@@ -936,9 +936,9 @@ def tuplize(d):
list_of_tuples = []
key_list = sorted(list(d.keys()))
for key in key_list:
- if type(d[key]) == list:
+ if isinstance(d[key], list):
# Convert a value which is itself a list of dict to a list of tuples.
- if d[key] and type(d[key][0]) == dict:
+ if d[key] and isinstance(d[key][0], dict):
sub_tuples = []
for sub_dict in d[key]:
sub_tuples.append(tuplize(sub_dict))
@@ -948,7 +948,7 @@ def tuplize(d):
list_of_tuples.append((sub_tuples is None, key, sub_tuples))
else:
list_of_tuples.append((d[key] is None, key, d[key]))
- elif type(d[key]) == dict:
+ elif isinstance(d[key], dict):
tupled_value = tuplize(d[key])
list_of_tuples.append((tupled_value is None, key, tupled_value))
else:
@@ -969,13 +969,13 @@ def sort_dictionary(d):
"""
sorted_d = {}
for key in d:
- if type(d[key]) == list:
- if d[key] and type(d[key][0]) == dict:
+ if isinstance(d[key], list):
+ if d[key] and isinstance(d[key][0], dict):
sorted_value = sort_list_of_dictionary(d[key])
sorted_d[key] = sorted_value
else:
sorted_d[key] = sorted(d[key])
- elif type(d[key]) == dict:
+ elif isinstance(d[key], dict):
sorted_d[key] = sort_dictionary(d[key])
else:
sorted_d[key] = d[key]
@@ -1026,10 +1026,7 @@ def check_if_user_value_matches_resources_attr(
return
if (
- resources_value_for_attr is None
- and len(user_provided_value_for_attr) >= 0
- or user_provided_value_for_attr is None
- and len(resources_value_for_attr) >= 0
+ resources_value_for_attr is None or user_provided_value_for_attr is None
):
res[0] = False
return
@@ -1044,7 +1041,7 @@ def check_if_user_value_matches_resources_attr(
if (
user_provided_value_for_attr
- and type(user_provided_value_for_attr[0]) == dict
+ and isinstance(user_provided_value_for_attr[0], dict)
):
# Process a list of dict
sorted_user_provided_value_for_attr = sort_list_of_dictionary(
@@ -1532,7 +1529,7 @@ def delete_and_wait(
result[resource_type] = resource
return result
# oci.wait_until() returns an instance of oci.util.Sentinel in case the resource is not found.
- if type(wait_response) is not Sentinel:
+ if not isinstance(wait_response, Sentinel):
resource = to_dict(wait_response.data)
else:
resource["lifecycle_state"] = "DELETED"
@@ -1547,7 +1544,7 @@ def delete_and_wait(
except ServiceError as ex:
# DNS API throws a 400 InvalidParameter when a zone id is provided for zone_name_or_id and if the zone
# resource is not available, instead of the expected 404. So working around this for now.
- if type(client) == oci.dns.DnsClient:
+ if isinstance(client, oci.dns.DnsClient):
if ex.status == 400 and ex.code == "InvalidParameter":
_debug(
"Resource {0} with {1} already deleted. So returning changed=False".format(
@@ -1774,7 +1771,7 @@ def update_class_type_attr_difference(
):
"""
Checks the difference and updates an attribute which is represented by a class
- instance. Not aplicable if the attribute type is a primitive value.
+ instance. Not applicable if the attribute type is a primitive value.
For example, if a class name is A with an attribute x, then if A.x = X(), then only
this method works.
:param update_class_details The instance which should be updated if there is change in
@@ -1936,7 +1933,7 @@ def get_target_resource_from_list(
module, list_resource_fn, target_resource_id=None, **kwargs
):
"""
- Returns a resource filtered by identifer from a list of resources. This method should be
+ Returns a resource filtered by identifier from a list of resources. This method should be
used as an alternative of 'get resource' method when 'get resource' is nor provided by
resource api. This method returns a wrapper of response object but that should not be
used as an input to 'wait_until' utility as this is only a partial wrapper of response object.
diff --git a/ansible_collections/community/general/plugins/module_utils/pipx.py b/ansible_collections/community/general/plugins/module_utils/pipx.py
index 2f19f352d..a385ec93e 100644
--- a/ansible_collections/community/general/plugins/module_utils/pipx.py
+++ b/ansible_collections/community/general/plugins/module_utils/pipx.py
@@ -42,7 +42,7 @@ def pipx_runner(module, command, **kwargs):
system_site_packages=fmt.as_bool("--system-site-packages"),
_list=fmt.as_fixed(['list', '--include-injected', '--json']),
editable=fmt.as_bool("--editable"),
- pip_args=fmt.as_opt_val('--pip-args'),
+ pip_args=fmt.as_opt_eq_val('--pip-args'),
),
environ_update={'USE_EMOJI': '0'},
check_rc=True,
diff --git a/ansible_collections/community/general/plugins/module_utils/proxmox.py b/ansible_collections/community/general/plugins/module_utils/proxmox.py
index 58287cec1..5fd783d65 100644
--- a/ansible_collections/community/general/plugins/module_utils/proxmox.py
+++ b/ansible_collections/community/general/plugins/module_utils/proxmox.py
@@ -7,17 +7,12 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-# (TODO: remove next line!)
-import atexit # noqa: F401, pylint: disable=unused-import
-# (TODO: remove next line!)
-import time # noqa: F401, pylint: disable=unused-import
-# (TODO: remove next line!)
-import re # noqa: F401, pylint: disable=unused-import
import traceback
PROXMOXER_IMP_ERR = None
try:
from proxmoxer import ProxmoxAPI
+ from proxmoxer import __version__ as proxmoxer_version
HAS_PROXMOXER = True
except ImportError:
HAS_PROXMOXER = False
@@ -25,8 +20,6 @@ except ImportError:
from ansible.module_utils.basic import env_fallback, missing_required_lib
-# (TODO: remove next line!)
-from ansible.module_utils.common.text.converters import to_native # noqa: F401, pylint: disable=unused-import
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
@@ -79,6 +72,7 @@ class ProxmoxAnsible(object):
module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR)
self.module = module
+ self.proxmoxer_version = proxmoxer_version
self.proxmox_api = self._connect()
# Test token validity
try:
@@ -98,6 +92,8 @@ class ProxmoxAnsible(object):
if api_password:
auth_args['password'] = api_password
else:
+ if self.proxmoxer_version < LooseVersion('1.1.0'):
+ self.module.fail_json('Using "token_name" and "token_value" require proxmoxer>=1.1.0')
auth_args['token_name'] = api_token_id
auth_args['token_value'] = api_token_secret
@@ -107,19 +103,30 @@ class ProxmoxAnsible(object):
self.module.fail_json(msg='%s' % e, exception=traceback.format_exc())
def version(self):
- apireturn = self.proxmox_api.version.get()
- return LooseVersion(apireturn['version'])
+ try:
+ apiversion = self.proxmox_api.version.get()
+ return LooseVersion(apiversion['version'])
+ except Exception as e:
+ self.module.fail_json(msg='Unable to retrieve Proxmox VE version: %s' % e)
def get_node(self, node):
- nodes = [n for n in self.proxmox_api.nodes.get() if n['node'] == node]
+ try:
+ nodes = [n for n in self.proxmox_api.nodes.get() if n['node'] == node]
+ except Exception as e:
+ self.module.fail_json(msg='Unable to retrieve Proxmox VE node: %s' % e)
return nodes[0] if nodes else None
def get_nextvmid(self):
- vmid = self.proxmox_api.cluster.nextid.get()
- return vmid
+ try:
+ return self.proxmox_api.cluster.nextid.get()
+ except Exception as e:
+ self.module.fail_json(msg='Unable to retrieve next free vmid: %s' % e)
def get_vmid(self, name, ignore_missing=False, choose_first_if_multiple=False):
- vms = [vm['vmid'] for vm in self.proxmox_api.cluster.resources.get(type='vm') if vm.get('name') == name]
+ try:
+ vms = [vm['vmid'] for vm in self.proxmox_api.cluster.resources.get(type='vm') if vm.get('name') == name]
+ except Exception as e:
+ self.module.fail_json(msg='Unable to retrieve list of VMs filtered by name %s: %s' % (name, e))
if not vms:
if ignore_missing:
@@ -132,7 +139,10 @@ class ProxmoxAnsible(object):
return vms[0]
def get_vm(self, vmid, ignore_missing=False):
- vms = [vm for vm in self.proxmox_api.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)]
+ try:
+ vms = [vm for vm in self.proxmox_api.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)]
+ except Exception as e:
+ self.module.fail_json(msg='Unable to retrieve list of VMs filtered by vmid %s: %s' % (vmid, e))
if vms:
return vms[0]
@@ -143,5 +153,44 @@ class ProxmoxAnsible(object):
self.module.fail_json(msg='VM with vmid %s does not exist in cluster' % vmid)
def api_task_ok(self, node, taskid):
- status = self.proxmox_api.nodes(node).tasks(taskid).status.get()
- return status['status'] == 'stopped' and status['exitstatus'] == 'OK'
+ try:
+ status = self.proxmox_api.nodes(node).tasks(taskid).status.get()
+ return status['status'] == 'stopped' and status['exitstatus'] == 'OK'
+ except Exception as e:
+ self.module.fail_json(msg='Unable to retrieve API task ID from node %s: %s' % (node, e))
+
+ def get_pool(self, poolid):
+ """Retrieve pool information
+
+ :param poolid: str - name of the pool
+ :return: dict - pool information
+ """
+ try:
+ return self.proxmox_api.pools(poolid).get()
+ except Exception as e:
+ self.module.fail_json(msg="Unable to retrieve pool %s information: %s" % (poolid, e))
+
+ def get_storages(self, type):
+ """Retrieve storages information
+
+ :param type: str, optional - type of storages
+ :return: list of dicts - array of storages
+ """
+ try:
+ return self.proxmox_api.storage.get(type=type)
+ except Exception as e:
+ self.module.fail_json(msg="Unable to retrieve storages information with type %s: %s" % (type, e))
+
+ def get_storage_content(self, node, storage, content=None, vmid=None):
+ try:
+ return (
+ self.proxmox_api.nodes(node)
+ .storage(storage)
+ .content()
+ .get(content=content, vmid=vmid)
+ )
+ except Exception as e:
+ self.module.fail_json(
+ msg="Unable to list content on %s, %s for %s and %s: %s"
+ % (node, storage, content, vmid, e)
+ )
diff --git a/ansible_collections/community/general/plugins/module_utils/redfish_utils.py b/ansible_collections/community/general/plugins/module_utils/redfish_utils.py
index 9b6470302..4c2057129 100644
--- a/ansible_collections/community/general/plugins/module_utils/redfish_utils.py
+++ b/ansible_collections/community/general/plugins/module_utils/redfish_utils.py
@@ -7,12 +7,21 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
import json
+import os
+import random
+import string
+import gzip
+from io import BytesIO
from ansible.module_utils.urls import open_url
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.common.text.converters import to_text
+from ansible.module_utils.common.text.converters import to_bytes
+from ansible.module_utils.six import text_type
from ansible.module_utils.six.moves import http_client
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.module_utils.ansible_release import __version__ as ansible_version
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
GET_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'}
POST_HEADERS = {'content-type': 'application/json', 'accept': 'application/json',
@@ -123,8 +132,10 @@ class RedfishUtils(object):
return resp
# The following functions are to send GET/POST/PATCH/DELETE requests
- def get_request(self, uri):
+ def get_request(self, uri, override_headers=None, allow_no_resp=False):
req_headers = dict(GET_HEADERS)
+ if override_headers:
+ req_headers.update(override_headers)
username, password, basic_auth = self._auth_params(req_headers)
try:
# Service root is an unauthenticated resource; remove credentials
@@ -136,8 +147,19 @@ class RedfishUtils(object):
force_basic_auth=basic_auth, validate_certs=False,
follow_redirects='all',
use_proxy=True, timeout=self.timeout)
- data = json.loads(to_native(resp.read()))
headers = dict((k.lower(), v) for (k, v) in resp.info().items())
+ try:
+ if headers.get('content-encoding') == 'gzip' and LooseVersion(ansible_version) < LooseVersion('2.14'):
+ # Older versions of Ansible do not automatically decompress the data
+ # Starting in 2.14, open_url will decompress the response data by default
+ data = json.loads(to_native(gzip.open(BytesIO(resp.read()), 'rt', encoding='utf-8').read()))
+ else:
+ data = json.loads(to_native(resp.read()))
+ except Exception as e:
+ # No response data; this is okay in certain cases
+ data = None
+ if not allow_no_resp:
+ raise
except HTTPError as e:
msg = self._get_extended_message(e)
return {'ret': False,
@@ -153,7 +175,7 @@ class RedfishUtils(object):
'msg': "Failed GET request to '%s': '%s'" % (uri, to_text(e))}
return {'ret': True, 'data': data, 'headers': headers, 'resp': resp}
- def post_request(self, uri, pyld):
+ def post_request(self, uri, pyld, multipart=False):
req_headers = dict(POST_HEADERS)
username, password, basic_auth = self._auth_params(req_headers)
try:
@@ -162,7 +184,14 @@ class RedfishUtils(object):
# header since this can cause conflicts with some services
if self.sessions_uri is not None and uri == (self.root_uri + self.sessions_uri):
basic_auth = False
- resp = open_url(uri, data=json.dumps(pyld),
+ if multipart:
+ # Multipart requests require special handling to encode the request body
+ multipart_encoder = self._prepare_multipart(pyld)
+ data = multipart_encoder[0]
+ req_headers['content-type'] = multipart_encoder[1]
+ else:
+ data = json.dumps(pyld)
+ resp = open_url(uri, data=data,
headers=req_headers, method="POST",
url_username=username, url_password=password,
force_basic_auth=basic_auth, validate_certs=False,
@@ -299,6 +328,59 @@ class RedfishUtils(object):
return {'ret': True, 'resp': resp}
@staticmethod
+ def _prepare_multipart(fields):
+ """Prepares a multipart body based on a set of fields provided.
+
+ Ideally it would have been good to use the existing 'prepare_multipart'
+ found in ansible.module_utils.urls, but it takes files and encodes them
+ as Base64 strings, which is not expected by Redfish services. It also
+ adds escaping of certain bytes in the payload, such as inserting '\r'
+ any time it finds a standalone '\n', which corrupts the image payload
+ send to the service. This implementation is simplified to Redfish's
+ usage and doesn't necessarily represent an exhaustive method of
+ building multipart requests.
+ """
+
+ def write_buffer(body, line):
+ # Adds to the multipart body based on the provided data type
+ # At this time there is only support for strings, dictionaries, and bytes (default)
+ if isinstance(line, text_type):
+ body.append(to_bytes(line, encoding='utf-8'))
+ elif isinstance(line, dict):
+ body.append(to_bytes(json.dumps(line), encoding='utf-8'))
+ else:
+ body.append(line)
+ return
+
+ # Generate a random boundary marker; may need to consider probing the
+ # payload for potential conflicts in the future
+ boundary = ''.join(random.choice(string.digits + string.ascii_letters) for i in range(30))
+ body = []
+ for form in fields:
+ # Fill in the form details
+ write_buffer(body, '--' + boundary)
+
+ # Insert the headers (Content-Disposition and Content-Type)
+ if 'filename' in fields[form]:
+ name = os.path.basename(fields[form]['filename']).replace('"', '\\"')
+ write_buffer(body, u'Content-Disposition: form-data; name="%s"; filename="%s"' % (to_text(form), to_text(name)))
+ else:
+ write_buffer(body, 'Content-Disposition: form-data; name="%s"' % form)
+ write_buffer(body, 'Content-Type: %s' % fields[form]['mime_type'])
+ write_buffer(body, '')
+
+ # Insert the payload; read from the file if not given by the caller
+ if 'content' not in fields[form]:
+ with open(to_bytes(fields[form]['filename'], errors='surrogate_or_strict'), 'rb') as f:
+ fields[form]['content'] = f.read()
+ write_buffer(body, fields[form]['content'])
+
+ # Finalize the entire request
+ write_buffer(body, '--' + boundary + '--')
+ write_buffer(body, '')
+ return (b'\r\n'.join(body), 'multipart/form-data; boundary=' + boundary)
+
+ @staticmethod
def _get_extended_message(error):
"""
Get Redfish ExtendedInfo message from response payload if present
@@ -652,7 +734,8 @@ class RedfishUtils(object):
properties = ['CacheSummary', 'FirmwareVersion', 'Identifiers',
'Location', 'Manufacturer', 'Model', 'Name', 'Id',
'PartNumber', 'SerialNumber', 'SpeedGbps', 'Status']
- key = "StorageControllers"
+ key = "Controllers"
+ deprecated_key = "StorageControllers"
# Find Storage service
response = self.get_request(self.root_uri + systems_uri)
@@ -680,7 +763,30 @@ class RedfishUtils(object):
data = response['data']
if key in data:
- controller_list = data[key]
+ controllers_uri = data[key][u'@odata.id']
+
+ response = self.get_request(self.root_uri + controllers_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if data[u'Members']:
+ for controller_member in data[u'Members']:
+ controller_member_uri = controller_member[u'@odata.id']
+ response = self.get_request(self.root_uri + controller_member_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ controller_result = {}
+ for property in properties:
+ if property in data:
+ controller_result[property] = data[property]
+ controller_results.append(controller_result)
+ elif deprecated_key in data:
+ controller_list = data[deprecated_key]
for controller in controller_list:
controller_result = {}
for property in properties:
@@ -702,7 +808,7 @@ class RedfishUtils(object):
properties = ['BlockSizeBytes', 'CapableSpeedGbs', 'CapacityBytes',
'EncryptionAbility', 'EncryptionStatus',
'FailurePredicted', 'HotspareType', 'Id', 'Identifiers',
- 'Manufacturer', 'MediaType', 'Model', 'Name',
+ 'Links', 'Manufacturer', 'MediaType', 'Model', 'Name',
'PartNumber', 'PhysicalLocation', 'Protocol', 'Revision',
'RotationSpeedRPM', 'SerialNumber', 'Status']
@@ -735,7 +841,25 @@ class RedfishUtils(object):
return response
data = response['data']
controller_name = 'Controller 1'
- if 'StorageControllers' in data:
+ if 'Controllers' in data:
+ controllers_uri = data['Controllers'][u'@odata.id']
+
+ response = self.get_request(self.root_uri + controllers_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ cdata = response['data']
+
+ if cdata[u'Members']:
+ controller_member_uri = cdata[u'Members'][0][u'@odata.id']
+
+ response = self.get_request(self.root_uri + controller_member_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ cdata = response['data']
+ controller_name = cdata['Name']
+ elif 'StorageControllers' in data:
sc = data['StorageControllers']
if sc:
if 'Name' in sc[0]:
@@ -754,7 +878,12 @@ class RedfishUtils(object):
for property in properties:
if property in data:
if data[property] is not None:
- drive_result[property] = data[property]
+ if property == "Links":
+ if "Volumes" in data["Links"].keys():
+ volumes = [v["@odata.id"] for v in data["Links"]["Volumes"]]
+ drive_result["Volumes"] = volumes
+ else:
+ drive_result[property] = data[property]
drive_results.append(drive_result)
drives = {'Controller': controller_name,
'Drives': drive_results}
@@ -832,14 +961,32 @@ class RedfishUtils(object):
if data.get('Members'):
for controller in data[u'Members']:
controller_list.append(controller[u'@odata.id'])
- for c in controller_list:
+ for idx, c in enumerate(controller_list):
uri = self.root_uri + c
response = self.get_request(uri)
if response['ret'] is False:
return response
data = response['data']
- controller_name = 'Controller 1'
- if 'StorageControllers' in data:
+ controller_name = 'Controller %s' % str(idx)
+ if 'Controllers' in data:
+ response = self.get_request(self.root_uri + data['Controllers'][u'@odata.id'])
+ if response['ret'] is False:
+ return response
+ c_data = response['data']
+
+ if c_data.get('Members') and c_data['Members']:
+ response = self.get_request(self.root_uri + c_data['Members'][0][u'@odata.id'])
+ if response['ret'] is False:
+ return response
+ member_data = response['data']
+
+ if member_data:
+ if 'Name' in member_data:
+ controller_name = member_data['Name']
+ else:
+ controller_id = member_data.get('Id', '1')
+ controller_name = 'Controller %s' % controller_id
+ elif 'StorageControllers' in data:
sc = data['StorageControllers']
if sc:
if 'Name' in sc[0]:
@@ -848,6 +995,7 @@ class RedfishUtils(object):
sc_id = sc[0].get('Id', '1')
controller_name = 'Controller %s' % sc_id
volume_results = []
+ volume_list = []
if 'Volumes' in data:
# Get a list of all volumes and build respective URIs
volumes_uri = data[u'Volumes'][u'@odata.id']
@@ -948,7 +1096,12 @@ class RedfishUtils(object):
# command should be PowerOn, PowerForceOff, etc.
if not command.startswith('Power'):
return {'ret': False, 'msg': 'Invalid Command (%s)' % command}
- reset_type = command[5:]
+
+ # Commands (except PowerCycle) will be stripped of the 'Power' prefix
+ if command == 'PowerCycle':
+ reset_type = command
+ else:
+ reset_type = command[5:]
# map Reboot to a ResetType that does a reboot
if reset_type == 'Reboot':
@@ -1056,7 +1209,8 @@ class RedfishUtils(object):
user_list = []
users_results = []
# Get these entries, but does not fail if not found
- properties = ['Id', 'Name', 'UserName', 'RoleId', 'Locked', 'Enabled']
+ properties = ['Id', 'Name', 'UserName', 'RoleId', 'Locked', 'Enabled',
+ 'AccountTypes', 'OEMAccountTypes']
response = self.get_request(self.root_uri + self.accounts_uri)
if response['ret'] is False:
@@ -1079,6 +1233,12 @@ class RedfishUtils(object):
if property in data:
user[property] = data[property]
+ # Filter out empty account slots
+ # An empty account slot can be detected if the username is an empty
+ # string and if the account is disabled
+ if user.get('UserName', '') == '' and not user.get('Enabled', False):
+ continue
+
users_results.append(user)
result["entries"] = users_results
return result
@@ -1101,6 +1261,10 @@ class RedfishUtils(object):
payload['Password'] = user.get('account_password')
if user.get('account_roleid'):
payload['RoleId'] = user.get('account_roleid')
+ if user.get('account_accounttypes'):
+ payload['AccountTypes'] = user.get('account_accounttypes')
+ if user.get('account_oemaccounttypes'):
+ payload['OEMAccountTypes'] = user.get('account_oemaccounttypes')
return self.patch_request(self.root_uri + uri, payload, check_pyld=True)
def add_user(self, user):
@@ -1131,6 +1295,10 @@ class RedfishUtils(object):
payload['Password'] = user.get('account_password')
if user.get('account_roleid'):
payload['RoleId'] = user.get('account_roleid')
+ if user.get('account_accounttypes'):
+ payload['AccountTypes'] = user.get('account_accounttypes')
+ if user.get('account_oemaccounttypes'):
+ payload['OEMAccountTypes'] = user.get('account_oemaccounttypes')
if user.get('account_id'):
payload['Id'] = user.get('account_id')
@@ -1400,29 +1568,37 @@ class RedfishUtils(object):
def _software_inventory(self, uri):
result = {}
- response = self.get_request(self.root_uri + uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
result['entries'] = []
- for member in data[u'Members']:
- uri = self.root_uri + member[u'@odata.id']
- # Get details for each software or firmware member
- response = self.get_request(uri)
+
+ while uri:
+ response = self.get_request(self.root_uri + uri)
if response['ret'] is False:
return response
result['ret'] = True
+
data = response['data']
- software = {}
- # Get these standard properties if present
- for key in ['Name', 'Id', 'Status', 'Version', 'Updateable',
- 'SoftwareId', 'LowestSupportedVersion', 'Manufacturer',
- 'ReleaseDate']:
- if key in data:
- software[key] = data.get(key)
- result['entries'].append(software)
+ if data.get('Members@odata.nextLink'):
+ uri = data.get('Members@odata.nextLink')
+ else:
+ uri = None
+
+ for member in data[u'Members']:
+ fw_uri = self.root_uri + member[u'@odata.id']
+ # Get details for each software or firmware member
+ response = self.get_request(fw_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ software = {}
+ # Get these standard properties if present
+ for key in ['Name', 'Id', 'Status', 'Version', 'Updateable',
+ 'SoftwareId', 'LowestSupportedVersion', 'Manufacturer',
+ 'ReleaseDate']:
+ if key in data:
+ software[key] = data.get(key)
+ result['entries'].append(software)
+
return result
def get_firmware_inventory(self):
@@ -1490,7 +1666,10 @@ class RedfishUtils(object):
# Scan the messages to see if next steps are needed
for message in operation_results['messages']:
- message_id = message['MessageId']
+ message_id = message.get('MessageId')
+ if message_id is None:
+ # While this is invalid, treat the lack of a MessageId as "no message"
+ continue
if message_id.startswith('Update.1.') and message_id.endswith('.OperationTransitionedToJob'):
# Operation rerouted to a job; update the status and handle
@@ -1572,6 +1751,64 @@ class RedfishUtils(object):
'msg': "SimpleUpdate requested",
'update_status': self._operation_results(response['resp'], response['data'])}
+ def multipath_http_push_update(self, update_opts):
+ """
+ Provides a software update via the URI specified by the
+ MultipartHttpPushUri property. Callers should adjust the 'timeout'
+ variable in the base object to accommodate the size of the image and
+ speed of the transfer. For example, a 200MB image will likely take
+ more than the default 10 second timeout.
+
+ :param update_opts: The parameters for the update operation
+ :return: dict containing the response of the update request
+ """
+ image_file = update_opts.get('update_image_file')
+ targets = update_opts.get('update_targets')
+ apply_time = update_opts.get('update_apply_time')
+ oem_params = update_opts.get('update_oem_params')
+
+ # Ensure the image file is provided
+ if not image_file:
+ return {'ret': False, 'msg':
+ 'Must specify update_image_file for the MultipartHTTPPushUpdate command'}
+ if not os.path.isfile(image_file):
+ return {'ret': False, 'msg':
+ 'Must specify a valid file for the MultipartHTTPPushUpdate command'}
+ try:
+ with open(image_file, 'rb') as f:
+ image_payload = f.read()
+ except Exception as e:
+ return {'ret': False, 'msg':
+ 'Could not read file %s' % image_file}
+
+ # Check that multipart HTTP push updates are supported
+ response = self.get_request(self.root_uri + self.update_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'MultipartHttpPushUri' not in data:
+ return {'ret': False, 'msg': 'Service does not support MultipartHttpPushUri'}
+ update_uri = data['MultipartHttpPushUri']
+
+ # Assemble the JSON payload portion of the request
+ payload = {"@Redfish.OperationApplyTime": "Immediate"}
+ if targets:
+ payload["Targets"] = targets
+ if apply_time:
+ payload["@Redfish.OperationApplyTime"] = apply_time
+ if oem_params:
+ payload["Oem"] = oem_params
+ multipart_payload = {
+ 'UpdateParameters': {'content': json.dumps(payload), 'mime_type': 'application/json'},
+ 'UpdateFile': {'filename': image_file, 'content': image_payload, 'mime_type': 'application/octet-stream'}
+ }
+ response = self.post_request(self.root_uri + update_uri, multipart_payload, multipart=True)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True,
+ 'msg': "MultipartHTTPPushUpdate requested",
+ 'update_status': self._operation_results(response['resp'], response['data'])}
+
def get_update_status(self, update_handle):
"""
Gets the status of an update operation.
@@ -1584,7 +1821,7 @@ class RedfishUtils(object):
return {'ret': False, 'msg': 'Must provide a handle tracking the update.'}
# Get the task or job tracking the update
- response = self.get_request(self.root_uri + update_handle)
+ response = self.get_request(self.root_uri + update_handle, allow_no_resp=True)
if response['ret'] is False:
return response
@@ -2142,7 +2379,7 @@ class RedfishUtils(object):
key = "Processors"
# Get these entries, but does not fail if not found
properties = ['Id', 'Name', 'Manufacturer', 'Model', 'MaxSpeedMHz',
- 'TotalCores', 'TotalThreads', 'Status']
+ 'ProcessorArchitecture', 'TotalCores', 'TotalThreads', 'Status']
# Search for 'key' entry and extract URI from it
response = self.get_request(self.root_uri + systems_uri)
@@ -2246,7 +2483,7 @@ class RedfishUtils(object):
result = {}
properties = ['Name', 'Id', 'Description', 'FQDN', 'IPv4Addresses', 'IPv6Addresses',
'NameServers', 'MACAddress', 'PermanentMACAddress',
- 'SpeedMbps', 'MTUSize', 'AutoNeg', 'Status']
+ 'SpeedMbps', 'MTUSize', 'AutoNeg', 'Status', 'LinkStatus']
response = self.get_request(self.root_uri + resource_uri)
if response['ret'] is False:
return response
@@ -2678,8 +2915,7 @@ class RedfishUtils(object):
# Get a list of all Chassis and build URIs, then get all PowerSupplies
# from each Power entry in the Chassis
- chassis_uri_list = self.chassis_uris
- for chassis_uri in chassis_uri_list:
+ for chassis_uri in self.chassis_uris:
response = self.get_request(self.root_uri + chassis_uri)
if response['ret'] is False:
return response
@@ -2726,7 +2962,7 @@ class RedfishUtils(object):
result = {}
inventory = {}
# Get these entries, but does not fail if not found
- properties = ['Status', 'HostName', 'PowerState', 'Model', 'Manufacturer',
+ properties = ['Status', 'HostName', 'PowerState', 'BootProgress', 'Model', 'Manufacturer',
'PartNumber', 'SystemType', 'AssetTag', 'ServiceTag',
'SerialNumber', 'SKU', 'BiosVersion', 'MemorySummary',
'ProcessorSummary', 'TrustedModules', 'Name', 'Id']
@@ -3135,8 +3371,9 @@ class RedfishUtils(object):
result = {}
inventory = {}
# Get these entries, but does not fail if not found
- properties = ['FirmwareVersion', 'ManagerType', 'Manufacturer', 'Model',
- 'PartNumber', 'PowerState', 'SerialNumber', 'Status', 'UUID']
+ properties = ['Id', 'FirmwareVersion', 'ManagerType', 'Manufacturer', 'Model',
+ 'PartNumber', 'PowerState', 'SerialNumber', 'ServiceIdentification',
+ 'Status', 'UUID']
response = self.get_request(self.root_uri + manager_uri)
if response['ret'] is False:
@@ -3154,6 +3391,35 @@ class RedfishUtils(object):
def get_multi_manager_inventory(self):
return self.aggregate_managers(self.get_manager_inventory)
+ def get_service_identification(self, manager):
+ result = {}
+ if manager is None:
+ if len(self.manager_uris) == 1:
+ manager = self.manager_uris[0].split('/')[-1]
+ elif len(self.manager_uris) > 1:
+ entries = self.get_multi_manager_inventory()['entries']
+ managers = [m[0]['manager_uri'] for m in entries if m[1].get('ServiceIdentification')]
+ if len(managers) == 1:
+ manager = managers[0].split('/')[-1]
+ else:
+ self.module.fail_json(msg=[
+ "Multiple managers with ServiceIdentification were found: %s" % str(managers),
+ "Please specify by using the 'manager' parameter in your playbook"])
+ elif len(self.manager_uris) == 0:
+ self.module.fail_json(msg="No manager identities were found")
+ response = self.get_request(self.root_uri + '/redfish/v1/Managers/' + manager, override_headers=None)
+ try:
+ result['service_identification'] = response['data']['ServiceIdentification']
+ except Exception as e:
+ self.module.fail_json(msg="Service ID not found for manager %s" % manager)
+ result['ret'] = True
+ return result
+
+ def set_service_identification(self, service_id):
+ data = {"ServiceIdentification": service_id}
+ resp = self.patch_request(self.root_uri + '/redfish/v1/Managers/' + self.resource_id, data, check_pyld=True)
+ return resp
+
def set_session_service(self, sessions_config):
if sessions_config is None:
return {'ret': False, 'msg':
@@ -3218,34 +3484,285 @@ class RedfishUtils(object):
return self.patch_request(self.root_uri + secure_boot_url, body, check_pyld=True)
+ def set_secure_boot(self, secure_boot_enable):
+ # This function enable Secure Boot on an OOB controller
+
+ response = self.get_request(self.root_uri + self.systems_uri)
+ if response["ret"] is False:
+ return response
+
+ server_details = response["data"]
+ secure_boot_url = server_details["SecureBoot"]["@odata.id"]
+
+ response = self.get_request(self.root_uri + secure_boot_url)
+ if response["ret"] is False:
+ return response
+
+ body = {}
+ body["SecureBootEnable"] = secure_boot_enable
+
+ return self.patch_request(self.root_uri + secure_boot_url, body, check_pyld=True)
+
def get_hpe_thermal_config(self):
result = {}
key = "Thermal"
# Go through list
- for chassis_uri in self.chassis_uri_list:
+ for chassis_uri in self.chassis_uris:
response = self.get_request(self.root_uri + chassis_uri)
if response['ret'] is False:
return response
result['ret'] = True
data = response['data']
- oem = data.get['Oem']
- hpe = oem.get['Hpe']
- thermal_config = hpe.get('ThermalConfiguration')
- result["current_thermal_config"] = thermal_config
- return result
+ val = data.get('Oem', {}).get('Hpe', {}).get('ThermalConfiguration')
+ if val is not None:
+ return {"ret": True, "current_thermal_config": val}
+ return {"ret": False}
def get_hpe_fan_percent_min(self):
result = {}
key = "Thermal"
# Go through list
- for chassis_uri in self.chassis_uri_list:
+ for chassis_uri in self.chassis_uris:
response = self.get_request(self.root_uri + chassis_uri)
if response['ret'] is False:
return response
- result['ret'] = True
data = response['data']
- oem = data.get['Oem']
- hpe = oem.get['Hpe']
- fan_percent_min_config = hpe.get('FanPercentMinimum')
- result["fan_percent_min"] = fan_percent_min_config
- return result
+ val = data.get('Oem', {}).get('Hpe', {}).get('FanPercentMinimum')
+ if val is not None:
+ return {"ret": True, "fan_percent_min": val}
+ return {"ret": False}
+
+ def delete_volumes(self, storage_subsystem_id, volume_ids):
+ # Find the Storage resource from the requested ComputerSystem resource
+ response = self.get_request(self.root_uri + self.systems_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ storage_uri = data.get('Storage', {}).get('@odata.id')
+ if storage_uri is None:
+ return {'ret': False, 'msg': 'Storage resource not found'}
+
+ # Get Storage Collection
+ response = self.get_request(self.root_uri + storage_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ # Collect Storage Subsystems
+ self.storage_subsystems_uris = [i['@odata.id'] for i in response['data'].get('Members', [])]
+ if not self.storage_subsystems_uris:
+ return {
+ 'ret': False,
+ 'msg': "StorageCollection's Members array is either empty or missing"}
+
+ # Matching Storage Subsystem ID with user input
+ self.storage_subsystem_uri = ""
+ for storage_subsystem_uri in self.storage_subsystems_uris:
+ if storage_subsystem_uri.split("/")[-2] == storage_subsystem_id:
+ self.storage_subsystem_uri = storage_subsystem_uri
+
+ if not self.storage_subsystem_uri:
+ return {
+ 'ret': False,
+ 'msg': "Provided Storage Subsystem ID %s does not exist on the server" % storage_subsystem_id}
+
+ # Get Volume Collection
+ response = self.get_request(self.root_uri + self.storage_subsystem_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ response = self.get_request(self.root_uri + data['Volumes']['@odata.id'])
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ # Collect Volumes
+ self.volume_uris = [i['@odata.id'] for i in response['data'].get('Members', [])]
+ if not self.volume_uris:
+ return {
+ 'ret': True, 'changed': False,
+ 'msg': "VolumeCollection's Members array is either empty or missing"}
+
+ # Delete each volume
+ for volume in self.volume_uris:
+ if volume.split("/")[-1] in volume_ids:
+ response = self.delete_request(self.root_uri + volume)
+ if response['ret'] is False:
+ return response
+
+ return {'ret': True, 'changed': True,
+ 'msg': "The following volumes were deleted: %s" % str(volume_ids)}
+
+ def create_volume(self, volume_details, storage_subsystem_id):
+ # Find the Storage resource from the requested ComputerSystem resource
+ response = self.get_request(self.root_uri + self.systems_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ storage_uri = data.get('Storage', {}).get('@odata.id')
+ if storage_uri is None:
+ return {'ret': False, 'msg': 'Storage resource not found'}
+
+ # Get Storage Collection
+ response = self.get_request(self.root_uri + storage_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ # Collect Storage Subsystems
+ self.storage_subsystems_uris = [i['@odata.id'] for i in response['data'].get('Members', [])]
+ if not self.storage_subsystems_uris:
+ return {
+ 'ret': False,
+ 'msg': "StorageCollection's Members array is either empty or missing"}
+
+ # Matching Storage Subsystem ID with user input
+ self.storage_subsystem_uri = ""
+ for storage_subsystem_uri in self.storage_subsystems_uris:
+ if storage_subsystem_uri.split("/")[-2] == storage_subsystem_id:
+ self.storage_subsystem_uri = storage_subsystem_uri
+
+ if not self.storage_subsystem_uri:
+ return {
+ 'ret': False,
+ 'msg': "Provided Storage Subsystem ID %s does not exist on the server" % storage_subsystem_id}
+
+ # Validate input parameters
+ required_parameters = ['RAIDType', 'Drives', 'CapacityBytes']
+ allowed_parameters = ['DisplayName', 'InitializeMethod', 'MediaSpanCount',
+ 'Name', 'ReadCachePolicy', 'StripSizeBytes', 'VolumeUsage', 'WriteCachePolicy']
+
+ for parameter in required_parameters:
+ if not volume_details.get(parameter):
+ return {
+ 'ret': False,
+ 'msg': "%s are required parameter to create a volume" % str(required_parameters)}
+
+ # Navigate to the volume uri of the correct storage subsystem
+ response = self.get_request(self.root_uri + self.storage_subsystem_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ # Deleting any volumes of RAIDType None present on the Storage Subsystem
+ response = self.get_request(self.root_uri + data['Volumes']['@odata.id'])
+ if response['ret'] is False:
+ return response
+ volume_data = response['data']
+
+ if "Members" in volume_data:
+ for member in volume_data["Members"]:
+ response = self.get_request(self.root_uri + member['@odata.id'])
+ if response['ret'] is False:
+ return response
+ member_data = response['data']
+
+ if member_data["RAIDType"] == "None":
+ response = self.delete_request(self.root_uri + member['@odata.id'])
+ if response['ret'] is False:
+ return response
+
+ # Construct payload and issue POST command to create volume
+ volume_details["Links"] = {}
+ volume_details["Links"]["Drives"] = []
+ for drive in volume_details["Drives"]:
+ volume_details["Links"]["Drives"].append({"@odata.id": drive})
+ del volume_details["Drives"]
+ payload = volume_details
+ response = self.post_request(self.root_uri + data['Volumes']['@odata.id'], payload)
+ if response['ret'] is False:
+ return response
+
+ return {'ret': True, 'changed': True,
+ 'msg': "Volume Created"}
+
+ def get_bios_registries(self):
+ # Get /redfish/v1
+ response = self.get_request(self.root_uri + self.systems_uri)
+ if not response["ret"]:
+ return response
+
+ server_details = response["data"]
+
+ # Get Registries URI
+ if "Bios" not in server_details:
+ msg = "Getting BIOS URI failed, Key 'Bios' not found in /redfish/v1/Systems/1/ response: %s"
+ return {
+ "ret": False,
+ "msg": msg % str(server_details)
+ }
+
+ bios_uri = server_details["Bios"]["@odata.id"]
+ bios_resp = self.get_request(self.root_uri + bios_uri)
+ if not bios_resp["ret"]:
+ return bios_resp
+
+ bios_data = bios_resp["data"]
+ attribute_registry = bios_data["AttributeRegistry"]
+
+ reg_uri = self.root_uri + self.service_root + "Registries/" + attribute_registry
+ reg_resp = self.get_request(reg_uri)
+ if not reg_resp["ret"]:
+ return reg_resp
+
+ reg_data = reg_resp["data"]
+
+ # Get BIOS attribute registry URI
+ lst = []
+
+ # Get the location URI
+ response = self.check_location_uri(reg_data, reg_uri)
+ if not response["ret"]:
+ return response
+
+ rsp_data, rsp_uri = response["rsp_data"], response["rsp_uri"]
+
+ if "RegistryEntries" not in rsp_data:
+ return {
+ "msg": "'RegistryEntries' not present in %s response, %s" % (rsp_uri, str(rsp_data)),
+ "ret": False
+ }
+
+ return {
+ "bios_registry": rsp_data,
+ "bios_registry_uri": rsp_uri,
+ "ret": True
+ }
+
+ def check_location_uri(self, resp_data, resp_uri):
+ # Get the location URI response
+ # return {"msg": self.creds, "ret": False}
+ vendor = self._get_vendor()['Vendor']
+ rsp_uri = ""
+ for loc in resp_data['Location']:
+ if loc['Language'] == "en":
+ rsp_uri = loc['Uri']
+ if vendor == 'HPE':
+ # WORKAROUND
+ # HPE systems with iLO 4 will have BIOS Attribute Registries location URI as a dictionary with key 'extref'
+ # Hence adding condition to fetch the Uri
+ if isinstance(loc['Uri'], dict) and "extref" in loc['Uri'].keys():
+ rsp_uri = loc['Uri']['extref']
+ if not rsp_uri:
+ msg = "Language 'en' not found in BIOS Attribute Registries location, URI: %s, response: %s"
+ return {
+ "ret": False,
+ "msg": msg % (resp_uri, str(resp_data))
+ }
+
+ res = self.get_request(self.root_uri + rsp_uri)
+ if res['ret'] is False:
+ # WORKAROUND
+ # HPE systems with iLO 4 or iLO5 compresses (gzip) for some URIs
+ # Hence adding encoding to the header
+ if vendor == 'HPE':
+ override_headers = {"Accept-Encoding": "gzip"}
+ res = self.get_request(self.root_uri + rsp_uri, override_headers=override_headers)
+ if res['ret']:
+ return {
+ "ret": True,
+ "rsp_data": res["data"],
+ "rsp_uri": rsp_uri
+ }
+ return res
diff --git a/ansible_collections/community/general/plugins/module_utils/redhat.py b/ansible_collections/community/general/plugins/module_utils/redhat.py
index f82cffaa0..110159ddf 100644
--- a/ansible_collections/community/general/plugins/module_utils/redhat.py
+++ b/ansible_collections/community/general/plugins/module_utils/redhat.py
@@ -24,6 +24,14 @@ from ansible.module_utils.six.moves import configparser
class RegistrationBase(object):
+ """
+ DEPRECATION WARNING
+
+ This class is deprecated and will be removed in community.general 10.0.0.
+ There is no replacement for it; please contact the community.general
+ maintainers in case you are using it.
+ """
+
def __init__(self, module, username=None, password=None):
self.module = module
self.username = username
@@ -71,10 +79,23 @@ class RegistrationBase(object):
class Rhsm(RegistrationBase):
+ """
+ DEPRECATION WARNING
+
+ This class is deprecated and will be removed in community.general 9.0.0.
+ There is no replacement for it; please contact the community.general
+ maintainers in case you are using it.
+ """
+
def __init__(self, module, username=None, password=None):
RegistrationBase.__init__(self, module, username, password)
self.config = self._read_config()
self.module = module
+ self.module.deprecate(
+ 'The Rhsm class is deprecated with no replacement.',
+ version='9.0.0',
+ collection_name='community.general',
+ )
def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'):
'''
@@ -200,14 +221,25 @@ class Rhsm(RegistrationBase):
class RhsmPool(object):
- '''
- Convenience class for housing subscription information
- '''
+ """
+ Convenience class for housing subscription information
+
+ DEPRECATION WARNING
+
+ This class is deprecated and will be removed in community.general 9.0.0.
+ There is no replacement for it; please contact the community.general
+ maintainers in case you are using it.
+ """
def __init__(self, module, **kwargs):
self.module = module
for k, v in kwargs.items():
setattr(self, k, v)
+ self.module.deprecate(
+ 'The RhsmPool class is deprecated with no replacement.',
+ version='9.0.0',
+ collection_name='community.general',
+ )
def __str__(self):
return str(self.__getattribute__('_name'))
@@ -223,11 +255,23 @@ class RhsmPool(object):
class RhsmPools(object):
"""
- This class is used for manipulating pools subscriptions with RHSM
+ This class is used for manipulating pools subscriptions with RHSM
+
+ DEPRECATION WARNING
+
+ This class is deprecated and will be removed in community.general 9.0.0.
+ There is no replacement for it; please contact the community.general
+ maintainers in case you are using it.
"""
+
def __init__(self, module):
self.module = module
self.products = self._load_product_list()
+ self.module.deprecate(
+ 'The RhsmPools class is deprecated with no replacement.',
+ version='9.0.0',
+ collection_name='community.general',
+ )
def __iter__(self):
return self.products.__iter__()
diff --git a/ansible_collections/community/general/plugins/module_utils/rundeck.py b/ansible_collections/community/general/plugins/module_utils/rundeck.py
index 6fb56fbae..7df68a360 100644
--- a/ansible_collections/community/general/plugins/module_utils/rundeck.py
+++ b/ansible_collections/community/general/plugins/module_utils/rundeck.py
@@ -72,7 +72,9 @@ def api_request(module, endpoint, data=None, method="GET"):
if info["status"] == 403:
module.fail_json(msg="Token authorization failed",
execution_info=json.loads(info["body"]))
- if info["status"] == 409:
+ elif info["status"] == 404:
+ return None, info
+ elif info["status"] == 409:
module.fail_json(msg="Job executions limit reached",
execution_info=json.loads(info["body"]))
elif info["status"] >= 500:
diff --git a/ansible_collections/community/general/plugins/module_utils/scaleway.py b/ansible_collections/community/general/plugins/module_utils/scaleway.py
index 43f209480..67b821103 100644
--- a/ansible_collections/community/general/plugins/module_utils/scaleway.py
+++ b/ansible_collections/community/general/plugins/module_utils/scaleway.py
@@ -303,7 +303,7 @@ class Scaleway(object):
wait_timeout = self.module.params["wait_timeout"]
wait_sleep_time = self.module.params["wait_sleep_time"]
- # Prevent requesting the ressource status too soon
+ # Prevent requesting the resource status too soon
time.sleep(wait_sleep_time)
start = datetime.datetime.utcnow()
diff --git a/ansible_collections/community/general/plugins/module_utils/snap.py b/ansible_collections/community/general/plugins/module_utils/snap.py
new file mode 100644
index 000000000..253269b9a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/snap.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2023, Alexei Znamensky <russoz@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
+
+
+_alias_state_map = dict(
+ present='alias',
+ absent='unalias',
+ info='aliases',
+)
+
+_state_map = dict(
+ present='install',
+ absent='remove',
+ enabled='enable',
+ disabled='disable',
+ refresh='refresh',
+)
+
+
+def snap_runner(module, **kwargs):
+ runner = CmdRunner(
+ module,
+ "snap",
+ arg_formats=dict(
+ state_alias=cmd_runner_fmt.as_map(_alias_state_map), # snap_alias only
+ name=cmd_runner_fmt.as_list(),
+ alias=cmd_runner_fmt.as_list(), # snap_alias only
+ state=cmd_runner_fmt.as_map(_state_map),
+ _list=cmd_runner_fmt.as_fixed("list"),
+ _set=cmd_runner_fmt.as_fixed("set"),
+ get=cmd_runner_fmt.as_fixed(["get", "-d"]),
+ classic=cmd_runner_fmt.as_bool("--classic"),
+ channel=cmd_runner_fmt.as_func(lambda v: [] if v == 'stable' else ['--channel', '{0}'.format(v)]),
+ options=cmd_runner_fmt.as_list(),
+ info=cmd_runner_fmt.as_fixed("info"),
+ dangerous=cmd_runner_fmt.as_bool("--dangerous"),
+ ),
+ check_rc=False,
+ **kwargs
+ )
+ return runner
diff --git a/ansible_collections/community/general/plugins/module_utils/vardict.py b/ansible_collections/community/general/plugins/module_utils/vardict.py
new file mode 100644
index 000000000..cfcce4d4d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/vardict.py
@@ -0,0 +1,197 @@
+# -*- coding: utf-8 -*-
+# (c) 2023, Alexei Znamensky <russoz@gmail.com>
+# Copyright (c) 2023, Ansible Project
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import copy
+
+
+class _Variable(object):
+ NOTHING = object()
+
+ def __init__(self, diff=False, output=True, change=None, fact=False, verbosity=0):
+ self.init = False
+ self.initial_value = None
+ self.value = None
+
+ self.diff = None
+ self._change = None
+ self.output = None
+ self.fact = None
+ self._verbosity = None
+ self.set_meta(output=output, diff=diff, change=change, fact=fact, verbosity=verbosity)
+
+ def getchange(self):
+ return self.diff if self._change is None else self._change
+
+ def setchange(self, value):
+ self._change = value
+
+ def getverbosity(self):
+ return self._verbosity
+
+ def setverbosity(self, v):
+ if not (0 <= v <= 4):
+ raise ValueError("verbosity must be an int in the range 0 to 4")
+ self._verbosity = v
+
+ change = property(getchange, setchange)
+ verbosity = property(getverbosity, setverbosity)
+
+ def set_meta(self, output=None, diff=None, change=None, fact=None, initial_value=NOTHING, verbosity=None):
+ """Set the metadata for the variable
+
+ Args:
+ output (bool, optional): flag indicating whether the variable should be in the output of the module. Defaults to None.
+ diff (bool, optional): flag indicating whether to generate diff mode output for this variable. Defaults to None.
+ change (bool, optional): flag indicating whether to track if changes happened to this variable. Defaults to None.
+ fact (bool, optional): flag indicating whether the variable should be exposed as a fact of the module. Defaults to None.
+ initial_value (any, optional): initial value of the variable, to be used with `change`. Defaults to NOTHING.
+ verbosity (int, optional): level of verbosity in which this variable is reported by the module as `output`, `fact` or `diff`. Defaults to None.
+ """
+ if output is not None:
+ self.output = output
+ if change is not None:
+ self.change = change
+ if diff is not None:
+ self.diff = diff
+ if fact is not None:
+ self.fact = fact
+ if initial_value is not _Variable.NOTHING:
+ self.initial_value = copy.deepcopy(initial_value)
+ if verbosity is not None:
+ self.verbosity = verbosity
+
+ def as_dict(self, meta_only=False):
+ d = {
+ "diff": self.diff,
+ "change": self.change,
+ "output": self.output,
+ "fact": self.fact,
+ "verbosity": self.verbosity,
+ }
+ if not meta_only:
+ d["initial_value"] = copy.deepcopy(self.initial_value)
+ d["value"] = self.value
+ return d
+
+ def set_value(self, value):
+ if not self.init:
+ self.initial_value = copy.deepcopy(value)
+ self.init = True
+ self.value = value
+ return self
+
+ def is_visible(self, verbosity):
+ return self.verbosity <= verbosity
+
+ @property
+ def has_changed(self):
+ return self.change and (self.initial_value != self.value)
+
+ @property
+ def diff_result(self):
+ if self.diff and self.has_changed:
+ return {'before': self.initial_value, 'after': self.value}
+ return
+
+ def __str__(self):
+ return "<_Variable: value={0!r}, initial={1!r}, diff={2}, output={3}, change={4}, verbosity={5}>".format(
+ self.value, self.initial_value, self.diff, self.output, self.change, self.verbosity
+ )
+
+
+class VarDict(object):
+ reserved_names = ('__vars__', '_var', 'var', 'set_meta', 'get_meta', 'set', 'output', 'diff', 'facts', 'has_changed', 'as_dict')
+
+ def __init__(self):
+ self.__vars__ = dict()
+
+ def __getitem__(self, item):
+ return self.__vars__[item].value
+
+ def __setitem__(self, key, value):
+ self.set(key, value)
+
+ def __getattr__(self, item):
+ try:
+ return self.__vars__[item].value
+ except KeyError:
+ return getattr(super(VarDict, self), item)
+
+ def __setattr__(self, key, value):
+ if key == '__vars__':
+ super(VarDict, self).__setattr__(key, value)
+ else:
+ self.set(key, value)
+
+ def _var(self, name):
+ return self.__vars__[name]
+
+ def var(self, name):
+ return self._var(name).as_dict()
+
+ def set_meta(self, name, **kwargs):
+ """Set the metadata for the variable
+
+ Args:
+ name (str): name of the variable having its metadata changed
+ output (bool, optional): flag indicating whether the variable should be in the output of the module. Defaults to None.
+ diff (bool, optional): flag indicating whether to generate diff mode output for this variable. Defaults to None.
+ change (bool, optional): flag indicating whether to track if changes happened to this variable. Defaults to None.
+ fact (bool, optional): flag indicating whether the variable should be exposed as a fact of the module. Defaults to None.
+ initial_value (any, optional): initial value of the variable, to be used with `change`. Defaults to NOTHING.
+ verbosity (int, optional): level of verbosity in which this variable is reported by the module as `output`, `fact` or `diff`. Defaults to None.
+ """
+ self._var(name).set_meta(**kwargs)
+
+ def get_meta(self, name):
+ return self._var(name).as_dict(meta_only=True)
+
+ def set(self, name, value, **kwargs):
+ """Set the value and optionally metadata for a variable. The variable is not required to exist prior to calling `set`.
+
+ For details on the accepted metada see the documentation for method `set_meta`.
+
+ Args:
+ name (str): name of the variable being changed
+ value (any): the value of the variable, it can be of any type
+
+ Raises:
+ ValueError: Raised if trying to set a variable with a reserved name.
+ """
+ if name in self.reserved_names:
+ raise ValueError("Name {0} is reserved".format(name))
+ if name in self.__vars__:
+ var = self._var(name)
+ var.set_meta(**kwargs)
+ else:
+ var = _Variable(**kwargs)
+ var.set_value(value)
+ self.__vars__[name] = var
+
+ def output(self, verbosity=0):
+ return dict((n, v.value) for n, v in self.__vars__.items() if v.output and v.is_visible(verbosity))
+
+ def diff(self, verbosity=0):
+ diff_results = [(n, v.diff_result) for n, v in self.__vars__.items() if v.diff_result and v.is_visible(verbosity)]
+ if diff_results:
+ before = dict((n, dr['before']) for n, dr in diff_results)
+ after = dict((n, dr['after']) for n, dr in diff_results)
+ return {'before': before, 'after': after}
+ return None
+
+ def facts(self, verbosity=0):
+ facts_result = dict((n, v.value) for n, v in self.__vars__.items() if v.fact and v.is_visible(verbosity))
+ return facts_result if facts_result else None
+
+ @property
+ def has_changed(self):
+ return any(var.has_changed for var in self.__vars__.values())
+
+ def as_dict(self):
+ return dict((name, var.value) for name, var in self.__vars__.items())
diff --git a/ansible_collections/community/general/plugins/module_utils/version.py b/ansible_collections/community/general/plugins/module_utils/version.py
index 369988197..935e8005e 100644
--- a/ansible_collections/community/general/plugins/module_utils/version.py
+++ b/ansible_collections/community/general/plugins/module_utils/version.py
@@ -10,13 +10,4 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-from ansible.module_utils.six import raise_from
-
-try:
- from ansible.module_utils.compat.version import LooseVersion # noqa: F401, pylint: disable=unused-import
-except ImportError:
- try:
- from distutils.version import LooseVersion # noqa: F401, pylint: disable=unused-import
- except ImportError as exc:
- msg = 'To use this plugin or module with ansible-core 2.11, you need to use Python < 3.12 with distutils.version present'
- raise_from(ImportError(msg), exc)
+from ansible.module_utils.compat.version import LooseVersion # noqa: F401, pylint: disable=unused-import
diff --git a/ansible_collections/community/general/plugins/module_utils/wdc_redfish_utils.py b/ansible_collections/community/general/plugins/module_utils/wdc_redfish_utils.py
index d27e02d7b..bc4b0c2cd 100644
--- a/ansible_collections/community/general/plugins/module_utils/wdc_redfish_utils.py
+++ b/ansible_collections/community/general/plugins/module_utils/wdc_redfish_utils.py
@@ -182,7 +182,7 @@ class WdcRedfishUtils(RedfishUtils):
:param str bundle_uri: HTTP URI of the firmware bundle.
:return: Firmware version number contained in the bundle, and whether or not the bundle is multi-tenant.
- Either value will be None if unable to deterine.
+ Either value will be None if unable to determine.
:rtype: str or None, bool or None
"""
bundle_temp_filename = fetch_file(module=self.module,
diff --git a/ansible_collections/community/general/plugins/modules/airbrake_deployment.py b/ansible_collections/community/general/plugins/modules/airbrake_deployment.py
index 42ac037e1..bad1b2c9d 100644
--- a/ansible_collections/community/general/plugins/modules/airbrake_deployment.py
+++ b/ansible_collections/community/general/plugins/modules/airbrake_deployment.py
@@ -72,7 +72,7 @@ options:
type: str
validate_certs:
description:
- - If C(false), SSL certificates for the target url will not be validated. This should only be used
+ - If V(false), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: true
diff --git a/ansible_collections/community/general/plugins/modules/aix_devices.py b/ansible_collections/community/general/plugins/modules/aix_devices.py
index ef4ed4961..a0f3cf48d 100644
--- a/ansible_collections/community/general/plugins/modules/aix_devices.py
+++ b/ansible_collections/community/general/plugins/modules/aix_devices.py
@@ -31,7 +31,7 @@ options:
device:
description:
- The name of the device.
- - C(all) is valid to rescan C(available) all devices (AIX cfgmgr command).
+ - V(all) is valid to rescan C(available) all devices (AIX cfgmgr command).
type: str
force:
description:
@@ -46,9 +46,9 @@ options:
state:
description:
- Controls the device state.
- - C(available) (alias C(present)) rescan a specific device or all devices (when C(device) is not specified).
- - C(removed) (alias C(absent) removes a device.
- - C(defined) changes device to Defined state.
+ - V(available) (alias V(present)) rescan a specific device or all devices (when O(device) is not specified).
+ - V(removed) (alias V(absent) removes a device.
+ - V(defined) changes device to Defined state.
type: str
choices: [ available, defined, removed ]
default: available
diff --git a/ansible_collections/community/general/plugins/modules/aix_filesystem.py b/ansible_collections/community/general/plugins/modules/aix_filesystem.py
index b1f363a93..6abf6317f 100644
--- a/ansible_collections/community/general/plugins/modules/aix_filesystem.py
+++ b/ansible_collections/community/general/plugins/modules/aix_filesystem.py
@@ -38,8 +38,8 @@ options:
type: list
elements: str
default:
- - agblksize='4096'
- - isnapshot='no'
+ - agblksize=4096
+ - isnapshot=no
auto_mount:
description:
- File system is automatically mounted at system restart.
@@ -58,7 +58,7 @@ options:
default: jfs2
permissions:
description:
- - Set file system permissions. C(rw) (read-write) or C(ro) (read-only).
+ - Set file system permissions. V(rw) (read-write) or V(ro) (read-only).
type: str
choices: [ ro, rw ]
default: rw
@@ -77,13 +77,13 @@ options:
type: str
rm_mount_point:
description:
- - Removes the mount point directory when used with state C(absent).
+ - Removes the mount point directory when used with state V(absent).
type: bool
default: false
size:
description:
- Specifies the file system size.
- - For already C(present) it will be resized.
+ - For already V(present) it will be resized.
- 512-byte blocks, Megabytes or Gigabytes. If the value has M specified
it will be in Megabytes. If the value has G specified it will be in
Gigabytes.
@@ -96,10 +96,10 @@ options:
state:
description:
- Controls the file system state.
- - C(present) check if file system exists, creates or resize.
- - C(absent) removes existing file system if already C(unmounted).
- - C(mounted) checks if the file system is mounted or mount the file system.
- - C(unmounted) check if the file system is unmounted or unmount the file system.
+ - V(present) check if file system exists, creates or resize.
+ - V(absent) removes existing file system if already V(unmounted).
+ - V(mounted) checks if the file system is mounted or mount the file system.
+ - V(unmounted) check if the file system is unmounted or unmount the file system.
type: str
choices: [ absent, mounted, present, unmounted ]
default: present
@@ -108,7 +108,7 @@ options:
- Specifies an existing volume group (VG).
type: str
notes:
- - For more C(attributes), please check "crfs" AIX manual.
+ - For more O(attributes), please check "crfs" AIX manual.
'''
EXAMPLES = r'''
@@ -365,7 +365,53 @@ def create_fs(
# Creates a LVM file system.
crfs_cmd = module.get_bin_path('crfs', True)
if not module.check_mode:
- cmd = [crfs_cmd, "-v", fs_type, "-m", filesystem, vg, device, mount_group, auto_mount, account_subsystem, "-p", permissions, size, "-a", attributes]
+ cmd = [crfs_cmd]
+
+ cmd.append("-v")
+ cmd.append(fs_type)
+
+ if vg:
+ (flag, value) = vg.split()
+ cmd.append(flag)
+ cmd.append(value)
+
+ if device:
+ (flag, value) = device.split()
+ cmd.append(flag)
+ cmd.append(value)
+
+ cmd.append("-m")
+ cmd.append(filesystem)
+
+ if mount_group:
+ (flag, value) = mount_group.split()
+ cmd.append(flag)
+ cmd.append(value)
+
+ if auto_mount:
+ (flag, value) = auto_mount.split()
+ cmd.append(flag)
+ cmd.append(value)
+
+ if account_subsystem:
+ (flag, value) = account_subsystem.split()
+ cmd.append(flag)
+ cmd.append(value)
+
+ cmd.append("-p")
+ cmd.append(permissions)
+
+ if size:
+ (flag, value) = size.split()
+ cmd.append(flag)
+ cmd.append(value)
+
+ if attributes:
+ splitted_attributes = attributes.split()
+ cmd.append("-a")
+ for value in splitted_attributes:
+ cmd.append(value)
+
rc, crfs_out, err = module.run_command(cmd)
if rc == 10:
@@ -461,7 +507,7 @@ def main():
module = AnsibleModule(
argument_spec=dict(
account_subsystem=dict(type='bool', default=False),
- attributes=dict(type='list', elements='str', default=["agblksize='4096'", "isnapshot='no'"]),
+ attributes=dict(type='list', elements='str', default=["agblksize=4096", "isnapshot=no"]),
auto_mount=dict(type='bool', default=True),
device=dict(type='str'),
filesystem=dict(type='str', required=True),
diff --git a/ansible_collections/community/general/plugins/modules/aix_inittab.py b/ansible_collections/community/general/plugins/modules/aix_inittab.py
index c2c968189..d4c9aa0b5 100644
--- a/ansible_collections/community/general/plugins/modules/aix_inittab.py
+++ b/ansible_collections/community/general/plugins/modules/aix_inittab.py
@@ -204,7 +204,7 @@ def main():
":" + module.params['action'] + ":" + module.params['command']
# If current entry exists or fields are different(if the entry does not
- # exists, then the entry wil be created
+ # exists, then the entry will be created
if (not current_entry['exist']) or (
module.params['runlevel'] != current_entry['runlevel'] or
module.params['action'] != current_entry['action'] or
diff --git a/ansible_collections/community/general/plugins/modules/aix_lvg.py b/ansible_collections/community/general/plugins/modules/aix_lvg.py
index d89c43de4..2892a68ad 100644
--- a/ansible_collections/community/general/plugins/modules/aix_lvg.py
+++ b/ansible_collections/community/general/plugins/modules/aix_lvg.py
@@ -36,13 +36,13 @@ options:
pvs:
description:
- List of comma-separated devices to use as physical devices in this volume group.
- - Required when creating or extending (C(present) state) the volume group.
- - If not informed reducing (C(absent) state) the volume group will be removed.
+ - Required when creating or extending (V(present) state) the volume group.
+ - If not informed reducing (V(absent) state) the volume group will be removed.
type: list
elements: str
state:
description:
- - Control if the volume group exists and volume group AIX state varyonvg C(varyon) or varyoffvg C(varyoff).
+ - Control if the volume group exists and volume group AIX state varyonvg V(varyon) or varyoffvg V(varyoff).
type: str
choices: [ absent, present, varyoff, varyon ]
default: present
diff --git a/ansible_collections/community/general/plugins/modules/aix_lvol.py b/ansible_collections/community/general/plugins/modules/aix_lvol.py
index 0a4a6eff5..1e7b42568 100644
--- a/ansible_collections/community/general/plugins/modules/aix_lvol.py
+++ b/ansible_collections/community/general/plugins/modules/aix_lvol.py
@@ -53,15 +53,15 @@ options:
policy:
description:
- Sets the interphysical volume allocation policy.
- - C(maximum) allocates logical partitions across the maximum number of physical volumes.
- - C(minimum) allocates logical partitions across the minimum number of physical volumes.
+ - V(maximum) allocates logical partitions across the maximum number of physical volumes.
+ - V(minimum) allocates logical partitions across the minimum number of physical volumes.
type: str
choices: [ maximum, minimum ]
default: maximum
state:
description:
- - Control if the logical volume exists. If C(present) and the
- volume does not already exist then the C(size) option is required.
+ - Control if the logical volume exists. If V(present) and the
+ volume does not already exist then the O(size) option is required.
type: str
choices: [ absent, present ]
default: present
@@ -72,7 +72,7 @@ options:
default: ''
pvs:
description:
- - A list of physical volumes e.g. C(hdisk1,hdisk2).
+ - A list of physical volumes, for example V(hdisk1,hdisk2).
type: list
elements: str
default: []
diff --git a/ansible_collections/community/general/plugins/modules/alerta_customer.py b/ansible_collections/community/general/plugins/modules/alerta_customer.py
index 120d98932..5e1a5f86c 100644
--- a/ansible_collections/community/general/plugins/modules/alerta_customer.py
+++ b/ansible_collections/community/general/plugins/modules/alerta_customer.py
@@ -58,7 +58,7 @@ options:
state:
description:
- Whether the customer should exist or not.
- - Both I(customer) and I(match) identify a customer that should be added or removed.
+ - Both O(customer) and O(match) identify a customer that should be added or removed.
type: str
choices: [ absent, present ]
default: present
diff --git a/ansible_collections/community/general/plugins/modules/ali_instance.py b/ansible_collections/community/general/plugins/modules/ali_instance.py
index 232c21ee0..087dc64b6 100644
--- a/ansible_collections/community/general/plugins/modules/ali_instance.py
+++ b/ansible_collections/community/general/plugins/modules/ali_instance.py
@@ -51,12 +51,12 @@ options:
type: str
image_id:
description:
- - Image ID used to launch instances. Required when I(state=present) and creating new ECS instances.
+ - Image ID used to launch instances. Required when O(state=present) and creating new ECS instances.
aliases: ['image']
type: str
instance_type:
description:
- - Instance type used to launch instances. Required when I(state=present) and creating new ECS instances.
+ - Instance type used to launch instances. Required when O(state=present) and creating new ECS instances.
aliases: ['type']
type: str
security_groups:
@@ -95,7 +95,7 @@ options:
max_bandwidth_out:
description:
- Maximum outgoing bandwidth to the public network, measured in Mbps (Megabits per second).
- Required when I(allocate_public_ip=true). Ignored when I(allocate_public_ip=false).
+ Required when O(allocate_public_ip=true). Ignored when O(allocate_public_ip=false).
default: 0
type: int
host_name:
@@ -134,16 +134,16 @@ options:
type: str
count:
description:
- - The number of the new instance. An integer value which indicates how many instances that match I(count_tag)
+ - The number of the new instance. An integer value which indicates how many instances that match O(count_tag)
should be running. Instances are either created or terminated based on this value.
default: 1
type: int
count_tag:
description:
- - I(count) determines how many instances based on a specific tag criteria should be present.
+ - O(count) determines how many instances based on a specific tag criteria should be present.
This can be expressed in multiple ways and is shown in the EXAMPLES section.
- The specified count_tag must already exist or be passed in as the I(tags) option.
- If it is not specified, it will be replaced by I(instance_name).
+ The specified count_tag must already exist or be passed in as the O(tags) option.
+ If it is not specified, it will be replaced by O(instance_name).
type: str
allocate_public_ip:
description:
@@ -159,7 +159,7 @@ options:
type: str
period:
description:
- - The charge duration of the instance, in months. Required when I(instance_charge_type=PrePaid).
+ - The charge duration of the instance, in months. Required when O(instance_charge_type=PrePaid).
- The valid value are [1-9, 12, 24, 36].
default: 1
type: int
@@ -170,13 +170,13 @@ options:
default: false
auto_renew_period:
description:
- - The duration of the automatic renew the charge of the instance. Required when I(auto_renew=true).
+ - The duration of the automatic renew the charge of the instance. Required when O(auto_renew=true).
choices: [1, 2, 3, 6, 12]
type: int
instance_ids:
description:
- A list of instance ids. It is required when need to operate existing instances.
- If it is specified, I(count) will lose efficacy.
+ If it is specified, O(count) will lose efficacy.
type: list
elements: str
force:
@@ -186,7 +186,7 @@ options:
type: bool
tags:
description:
- - A hash/dictionaries of instance tags, to add to the new instance or for starting/stopping instance by tag. C({"key":"value"})
+ - A hash/dictionaries of instance tags, to add to the new instance or for starting/stopping instance by tag. V({"key":"value"})
aliases: ["instance_tags"]
type: dict
version_added: '0.2.0'
@@ -229,7 +229,7 @@ options:
version_added: '0.2.0'
period_unit:
description:
- - The duration unit that you will buy the resource. It is valid when I(instance_charge_type=PrePaid).
+ - The duration unit that you will buy the resource. It is valid when O(instance_charge_type=PrePaid).
choices: ['Month', 'Week']
default: 'Month'
type: str
@@ -237,10 +237,10 @@ options:
dry_run:
description:
- Specifies whether to send a dry-run request.
- - If I(dry_run=true), Only a dry-run request is sent and no instance is created. The system checks whether the
+ - If O(dry_run=true), Only a dry-run request is sent and no instance is created. The system checks whether the
required parameters are set, and validates the request format, service permissions, and available ECS instances.
If the validation fails, the corresponding error code is returned. If the validation succeeds, the DryRunOperation error code is returned.
- - If I(dry_run=false), A request is sent. If the validation succeeds, the instance is created.
+ - If O(dry_run=false), A request is sent. If the validation succeeds, the instance is created.
default: false
type: bool
version_added: '0.2.0'
@@ -253,7 +253,7 @@ options:
author:
- "He Guimin (@xiaozhu36)"
requirements:
- - "python >= 3.6"
+ - "Python >= 3.6"
- "footmark >= 1.19.0"
extends_documentation_fragment:
- community.general.alicloud
diff --git a/ansible_collections/community/general/plugins/modules/ali_instance_info.py b/ansible_collections/community/general/plugins/modules/ali_instance_info.py
index e7ec7f395..d6a787374 100644
--- a/ansible_collections/community/general/plugins/modules/ali_instance_info.py
+++ b/ansible_collections/community/general/plugins/modules/ali_instance_info.py
@@ -31,7 +31,6 @@ short_description: Gather information on instances of Alibaba Cloud ECS
description:
- This module fetches data from the Open API in Alicloud.
The module must be called from within the ECS instance itself.
- - This module was called C(ali_instance_facts) before Ansible 2.9. The usage did not change.
attributes:
check_mode:
@@ -53,15 +52,15 @@ options:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. The filter keys can be
all of request parameters. See U(https://www.alibabacloud.com/help/doc-detail/25506.htm) for parameter details.
- Filter keys can be same as request parameter name or be lower case and use underscore ("_") or dash ("-") to
- connect different words in one parameter. 'InstanceIds' should be a list.
- 'Tag.n.Key' and 'Tag.n.Value' should be a dict and using I(tags) instead.
+ Filter keys can be same as request parameter name or be lower case and use underscore (V("_")) or dash (V("-")) to
+ connect different words in one parameter. C(InstanceIds) should be a list.
+ C(Tag.n.Key) and C(Tag.n.Value) should be a dict and using O(tags) instead.
type: dict
version_added: '0.2.0'
author:
- "He Guimin (@xiaozhu36)"
requirements:
- - "python >= 3.6"
+ - "Python >= 3.6"
- "footmark >= 1.13.0"
extends_documentation_fragment:
- community.general.alicloud
diff --git a/ansible_collections/community/general/plugins/modules/alternatives.py b/ansible_collections/community/general/plugins/modules/alternatives.py
index 97d4f51fb..0d1b1e8cb 100644
--- a/ansible_collections/community/general/plugins/modules/alternatives.py
+++ b/ansible_collections/community/general/plugins/modules/alternatives.py
@@ -44,21 +44,21 @@ options:
description:
- The path to the symbolic link that should point to the real executable.
- This option is always required on RHEL-based distributions. On Debian-based distributions this option is
- required when the alternative I(name) is unknown to the system.
+ required when the alternative O(name) is unknown to the system.
type: path
priority:
description:
- - The priority of the alternative. If no priority is given for creation C(50) is used as a fallback.
+ - The priority of the alternative. If no priority is given for creation V(50) is used as a fallback.
type: int
state:
description:
- - C(present) - install the alternative (if not already installed), but do
+ - V(present) - install the alternative (if not already installed), but do
not set it as the currently selected alternative for the group.
- - C(selected) - install the alternative (if not already installed), and
+ - V(selected) - install the alternative (if not already installed), and
set it as the currently selected alternative for the group.
- - C(auto) - install the alternative (if not already installed), and
+ - V(auto) - install the alternative (if not already installed), and
set the group to auto mode. Added in community.general 5.1.0.
- - C(absent) - removes the alternative. Added in community.general 5.1.0.
+ - V(absent) - removes the alternative. Added in community.general 5.1.0.
choices: [ present, selected, auto, absent ]
default: selected
type: str
diff --git a/ansible_collections/community/general/plugins/modules/ansible_galaxy_install.py b/ansible_collections/community/general/plugins/modules/ansible_galaxy_install.py
index 0f38eabdf..3b0a8fd47 100644
--- a/ansible_collections/community/general/plugins/modules/ansible_galaxy_install.py
+++ b/ansible_collections/community/general/plugins/modules/ansible_galaxy_install.py
@@ -17,15 +17,13 @@ version_added: 3.5.0
description:
- This module allows the installation of Ansible collections or roles using C(ansible-galaxy).
notes:
- - >
- B(Ansible 2.9/2.10): The C(ansible-galaxy) command changed significantly between Ansible 2.9 and
- ansible-base 2.10 (later ansible-core 2.11). See comments in the parameters.
+ - Support for B(Ansible 2.9/2.10) was removed in community.general 8.0.0.
- >
The module will try and run using the C(C.UTF-8) locale.
If that fails, it will try C(en_US.UTF-8).
If that one also fails, the module will fail.
requirements:
- - Ansible 2.9, ansible-base 2.10, or ansible-core 2.11 or newer
+ - ansible-core 2.11 or newer
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -37,9 +35,8 @@ options:
type:
description:
- The type of installation performed by C(ansible-galaxy).
- - If I(type) is C(both), then I(requirements_file) must be passed and it may contain both roles and collections.
- - "Note however that the opposite is not true: if using a I(requirements_file), then I(type) can be any of the three choices."
- - "B(Ansible 2.9): The option C(both) will have the same effect as C(role)."
+ - If O(type=both), then O(requirements_file) must be passed and it may contain both roles and collections.
+ - "Note however that the opposite is not true: if using a O(requirements_file), then O(type) can be any of the three choices."
type: str
choices: [collection, role, both]
required: true
@@ -48,22 +45,21 @@ options:
- Name of the collection or role being installed.
- >
Versions can be specified with C(ansible-galaxy) usual formats.
- For example, the collection C(community.docker:1.6.1) or the role C(ansistrano.deploy,3.8.0).
- - I(name) and I(requirements_file) are mutually exclusive.
+ For example, the collection V(community.docker:1.6.1) or the role V(ansistrano.deploy,3.8.0).
+ - O(name) and O(requirements_file) are mutually exclusive.
type: str
requirements_file:
description:
- Path to a file containing a list of requirements to be installed.
- - It works for I(type) equals to C(collection) and C(role).
- - I(name) and I(requirements_file) are mutually exclusive.
- - "B(Ansible 2.9): It can only be used to install either I(type=role) or I(type=collection), but not both at the same run."
+ - It works for O(type) equals to V(collection) and V(role).
+ - O(name) and O(requirements_file) are mutually exclusive.
type: path
dest:
description:
- - The path to the directory containing your collections or roles, according to the value of I(type).
+ - The path to the directory containing your collections or roles, according to the value of O(type).
- >
- Please notice that C(ansible-galaxy) will not install collections with I(type=both), when I(requirements_file)
- contains both roles and collections and I(dest) is specified.
+ Please notice that C(ansible-galaxy) will not install collections with O(type=both), when O(requirements_file)
+ contains both roles and collections and O(dest) is specified.
type: path
no_deps:
description:
@@ -74,25 +70,17 @@ options:
force:
description:
- Force overwriting an existing role or collection.
- - Using I(force=true) is mandatory when downgrading.
- - "B(Ansible 2.9 and 2.10): Must be C(true) to upgrade roles and collections."
+ - Using O(force=true) is mandatory when downgrading.
type: bool
default: false
ack_ansible29:
description:
- - Acknowledge using Ansible 2.9 with its limitations, and prevents the module from generating warnings about them.
- - This option is completely ignored if using a version of Ansible greater than C(2.9.x).
- - Note that this option will be removed without any further deprecation warning once support
- for Ansible 2.9 is removed from this module.
+ - This option has no longer any effect and will be removed in community.general 9.0.0.
type: bool
default: false
ack_min_ansiblecore211:
description:
- - Acknowledge the module is deprecating support for Ansible 2.9 and ansible-base 2.10.
- - Support for those versions will be removed in community.general 8.0.0.
- At the same time, this option will be removed without any deprecation warning!
- - This option is completely ignored if using a version of ansible-core/ansible-base/Ansible greater than C(2.11).
- - For the sake of conciseness, setting this parameter to C(true) implies I(ack_ansible29=true).
+ - This option has no longer any effect and will be removed in community.general 9.0.0.
type: bool
default: false
"""
@@ -124,30 +112,29 @@ EXAMPLES = """
RETURN = """
type:
- description: The value of the I(type) parameter.
+ description: The value of the O(type) parameter.
type: str
returned: always
name:
- description: The value of the I(name) parameter.
+ description: The value of the O(name) parameter.
type: str
returned: always
dest:
- description: The value of the I(dest) parameter.
+ description: The value of the O(dest) parameter.
type: str
returned: always
requirements_file:
- description: The value of the I(requirements_file) parameter.
+ description: The value of the O(requirements_file) parameter.
type: str
returned: always
force:
- description: The value of the I(force) parameter.
+ description: The value of the O(force) parameter.
type: bool
returned: always
installed_roles:
description:
- - If I(requirements_file) is specified instead, returns dictionary with all the roles installed per path.
- - If I(name) is specified, returns that role name and the version installed per path.
- - "B(Ansible 2.9): Returns empty because C(ansible-galaxy) has no C(list) subcommand."
+ - If O(requirements_file) is specified instead, returns dictionary with all the roles installed per path.
+ - If O(name) is specified, returns that role name and the version installed per path.
type: dict
returned: always when installing roles
contains:
@@ -162,9 +149,8 @@ RETURN = """
ansistrano.deploy: 3.8.0
installed_collections:
description:
- - If I(requirements_file) is specified instead, returns dictionary with all the collections installed per path.
- - If I(name) is specified, returns that collection name and the version installed per path.
- - "B(Ansible 2.9): Returns empty because C(ansible-galaxy) has no C(list) subcommand."
+ - If O(requirements_file) is specified instead, returns dictionary with all the collections installed per path.
+ - If O(name) is specified, returns that collection name and the version installed per path.
type: dict
returned: always when installing collections
contains:
@@ -206,7 +192,6 @@ class AnsibleGalaxyInstall(ModuleHelper):
_RE_LIST_ROLE = re.compile(r'^- (?P<elem>\w+\.\w+),\s+(?P<version>[\d\.]+)\s*$')
_RE_INSTALL_OUTPUT = None # Set after determining ansible version, see __init_module__()
ansible_version = None
- is_ansible29 = None
output_params = ('type', 'name', 'dest', 'requirements_file', 'force', 'no_deps')
module = dict(
@@ -217,8 +202,18 @@ class AnsibleGalaxyInstall(ModuleHelper):
dest=dict(type='path'),
force=dict(type='bool', default=False),
no_deps=dict(type='bool', default=False),
- ack_ansible29=dict(type='bool', default=False),
- ack_min_ansiblecore211=dict(type='bool', default=False),
+ ack_ansible29=dict(
+ type='bool',
+ default=False,
+ removed_in_version='9.0.0',
+ removed_from_collection='community.general',
+ ),
+ ack_min_ansiblecore211=dict(
+ type='bool',
+ default=False,
+ removed_in_version='9.0.0',
+ removed_from_collection='community.general',
+ ),
),
mutually_exclusive=[('name', 'requirements_file')],
required_one_of=[('name', 'requirements_file')],
@@ -268,26 +263,22 @@ class AnsibleGalaxyInstall(ModuleHelper):
def __init_module__(self):
# self.runner = CmdRunner(self.module, command=self.command, arg_formats=self.command_args_formats, force_lang=self.force_lang)
self.runner, self.ansible_version = self._get_ansible_galaxy_version()
- if self.ansible_version < (2, 11) and not self.vars.ack_min_ansiblecore211:
- self.module.deprecate(
- "Support for Ansible 2.9 and ansible-base 2.10 is being deprecated. "
- "At the same time support for them is ended, also the ack_ansible29 option will be removed. "
- "Upgrading is strongly recommended, or set 'ack_min_ansiblecore211' to suppress this message.",
- version="8.0.0",
- collection_name="community.general",
+ if self.ansible_version < (2, 11):
+ self.module.fail_json(
+ msg="Support for Ansible 2.9 and ansible-base 2.10 has ben removed."
)
- self.is_ansible29 = self.ansible_version < (2, 10)
- if self.is_ansible29:
- self._RE_INSTALL_OUTPUT = re.compile(r"^(?:.*Installing '(?P<collection>\w+\.\w+):(?P<cversion>[\d\.]+)'.*"
- r'|- (?P<role>\w+\.\w+) \((?P<rversion>[\d\.]+)\)'
- r' was installed successfully)$')
- else:
- # Collection install output changed:
- # ansible-base 2.10: "coll.name (x.y.z)"
- # ansible-core 2.11+: "coll.name:x.y.z"
- self._RE_INSTALL_OUTPUT = re.compile(r'^(?:(?P<collection>\w+\.\w+)(?: \(|:)(?P<cversion>[\d\.]+)\)?'
- r'|- (?P<role>\w+\.\w+) \((?P<rversion>[\d\.]+)\))'
- r' was installed successfully$')
+ # Collection install output changed:
+ # ansible-base 2.10: "coll.name (x.y.z)"
+ # ansible-core 2.11+: "coll.name:x.y.z"
+ self._RE_INSTALL_OUTPUT = re.compile(r'^(?:(?P<collection>\w+\.\w+)(?: \(|:)(?P<cversion>[\d\.]+)\)?'
+ r'|- (?P<role>\w+\.\w+) \((?P<rversion>[\d\.]+)\))'
+ r' was installed successfully$')
+ self.vars.set("new_collections", {}, change=True)
+ self.vars.set("new_roles", {}, change=True)
+ if self.vars.type != "collection":
+ self.vars.installed_roles = self._list_roles()
+ if self.vars.type != "roles":
+ self.vars.installed_collections = self._list_collections()
def _list_element(self, _type, path_re, elem_re):
def process(rc, out, err):
@@ -322,24 +313,8 @@ class AnsibleGalaxyInstall(ModuleHelper):
def _list_roles(self):
return self._list_element('role', self._RE_LIST_PATH, self._RE_LIST_ROLE)
- def _setup29(self):
- self.vars.set("new_collections", {})
- self.vars.set("new_roles", {})
- self.vars.set("ansible29_change", False, change=True, output=False)
- if not (self.vars.ack_ansible29 or self.vars.ack_min_ansiblecore211):
- self.warn("Ansible 2.9 or older: unable to retrieve lists of roles and collections already installed")
- if self.vars.requirements_file is not None and self.vars.type == 'both':
- self.warn("Ansible 2.9 or older: will install only roles from requirement files")
-
- def _setup210plus(self):
- self.vars.set("new_collections", {}, change=True)
- self.vars.set("new_roles", {}, change=True)
- if self.vars.type != "collection":
- self.vars.installed_roles = self._list_roles()
- if self.vars.type != "roles":
- self.vars.installed_collections = self._list_collections()
-
def __run__(self):
+
def process(rc, out, err):
for line in out.splitlines():
match = self._RE_INSTALL_OUTPUT.match(line)
@@ -347,19 +322,9 @@ class AnsibleGalaxyInstall(ModuleHelper):
continue
if match.group("collection"):
self.vars.new_collections[match.group("collection")] = match.group("cversion")
- if self.is_ansible29:
- self.vars.ansible29_change = True
elif match.group("role"):
self.vars.new_roles[match.group("role")] = match.group("rversion")
- if self.is_ansible29:
- self.vars.ansible29_change = True
-
- if self.is_ansible29:
- if self.vars.type == 'both':
- raise ValueError("Type 'both' not supported in Ansible 2.9")
- self._setup29()
- else:
- self._setup210plus()
+
with self.runner("type galaxy_cmd force no_deps dest requirements_file name", output_process=process) as ctx:
ctx.run(galaxy_cmd="install")
if self.verbosity > 2:
diff --git a/ansible_collections/community/general/plugins/modules/apache2_module.py b/ansible_collections/community/general/plugins/modules/apache2_module.py
index 2e2456d74..a9fd72b24 100644
--- a/ansible_collections/community/general/plugins/modules/apache2_module.py
+++ b/ansible_collections/community/general/plugins/modules/apache2_module.py
@@ -37,7 +37,7 @@ options:
description:
- Identifier of the module as listed by C(apache2ctl -M).
This is optional and usually determined automatically by the common convention of
- appending C(_module) to I(name) as well as custom exception for popular modules.
+ appending V(_module) to O(name) as well as custom exception for popular modules.
required: false
force:
description:
@@ -154,7 +154,7 @@ def _get_ctl_binary(module):
if ctl_binary is not None:
return ctl_binary
- module.fail_json(msg="Neither of apache2ctl nor apachctl found. At least one apache control binary is necessary.")
+ module.fail_json(msg="Neither of apache2ctl nor apachectl found. At least one apache control binary is necessary.")
def _module_is_enabled(module):
diff --git a/ansible_collections/community/general/plugins/modules/apk.py b/ansible_collections/community/general/plugins/modules/apk.py
index e56b2165d..a6b058b93 100644
--- a/ansible_collections/community/general/plugins/modules/apk.py
+++ b/ansible_collections/community/general/plugins/modules/apk.py
@@ -17,7 +17,7 @@ DOCUMENTATION = '''
module: apk
short_description: Manages apk packages
description:
- - Manages I(apk) packages for Alpine Linux.
+ - Manages C(apk) packages for Alpine Linux.
author: "Kevin Brebanov (@kbrebanov)"
extends_documentation_fragment:
- community.general.attributes
@@ -35,7 +35,9 @@ options:
default: false
name:
description:
- - A package name, like C(foo), or multiple packages, like C(foo, bar).
+ - A package name, like V(foo), or multiple packages, like V(foo,bar).
+ - Do not include additional whitespace when specifying multiple packages as a string.
+ Prefer YAML lists over comma-separating multiple package names.
type: list
elements: str
no_cache:
@@ -53,15 +55,15 @@ options:
state:
description:
- Indicates the desired package(s) state.
- - C(present) ensures the package(s) is/are present. C(installed) can be used as an alias.
- - C(absent) ensures the package(s) is/are absent. C(removed) can be used as an alias.
- - C(latest) ensures the package(s) is/are present and the latest version(s).
+ - V(present) ensures the package(s) is/are present. V(installed) can be used as an alias.
+ - V(absent) ensures the package(s) is/are absent. V(removed) can be used as an alias.
+ - V(latest) ensures the package(s) is/are present and the latest version(s).
default: present
choices: [ "present", "absent", "latest", "installed", "removed" ]
type: str
update_cache:
description:
- - Update repository indexes. Can be run with other steps or on it's own.
+ - Update repository indexes. Can be run with other steps or on its own.
type: bool
default: false
upgrade:
@@ -76,8 +78,8 @@ options:
default: /etc/apk/world
version_added: 5.4.0
notes:
- - 'I(name) and I(upgrade) are mutually exclusive.'
- - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the I(name) option.
+ - 'O(name) and O(upgrade) are mutually exclusive.'
+ - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the O(name) option.
'''
EXAMPLES = '''
diff --git a/ansible_collections/community/general/plugins/modules/apt_repo.py b/ansible_collections/community/general/plugins/modules/apt_repo.py
index 556039027..4c82587d0 100644
--- a/ansible_collections/community/general/plugins/modules/apt_repo.py
+++ b/ansible_collections/community/general/plugins/modules/apt_repo.py
@@ -41,7 +41,7 @@ options:
remove_others:
description:
- Remove other then added repositories
- - Used if I(state=present)
+ - Used if O(state=present)
type: bool
default: false
update:
diff --git a/ansible_collections/community/general/plugins/modules/apt_rpm.py b/ansible_collections/community/general/plugins/modules/apt_rpm.py
index 8749086bb..de1b57411 100644
--- a/ansible_collections/community/general/plugins/modules/apt_rpm.py
+++ b/ansible_collections/community/general/plugins/modules/apt_rpm.py
@@ -16,7 +16,7 @@ DOCUMENTATION = '''
module: apt_rpm
short_description: APT-RPM package manager
description:
- - Manages packages with I(apt-rpm). Both low-level (I(rpm)) and high-level (I(apt-get)) package manager binaries required.
+ - Manages packages with C(apt-rpm). Both low-level (C(rpm)) and high-level (C(apt-get)) package manager binaries required.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -28,6 +28,9 @@ options:
package:
description:
- List of packages to install, upgrade, or remove.
+ - Since community.general 8.0.0, may include paths to local C(.rpm) files
+ if O(state=installed) or O(state=present), requires C(rpm) python
+ module.
aliases: [ name, pkg ]
type: list
elements: str
@@ -63,6 +66,9 @@ options:
type: bool
default: false
version_added: 6.5.0
+requirements:
+ - C(rpm) python package (rpm bindings), optional. Required if O(package)
+ option includes local files.
author:
- Evgenii Terechkov (@evgkrsk)
'''
@@ -109,15 +115,48 @@ EXAMPLES = '''
'''
import os
-
-from ansible.module_utils.basic import AnsibleModule
-
+import re
+import traceback
+
+from ansible.module_utils.basic import (
+ AnsibleModule,
+ missing_required_lib,
+)
+from ansible.module_utils.common.text.converters import to_native
+
+try:
+ import rpm
+except ImportError:
+ HAS_RPM_PYTHON = False
+ RPM_PYTHON_IMPORT_ERROR = traceback.format_exc()
+else:
+ HAS_RPM_PYTHON = True
+ RPM_PYTHON_IMPORT_ERROR = None
+
+APT_CACHE = "/usr/bin/apt-cache"
APT_PATH = "/usr/bin/apt-get"
RPM_PATH = "/usr/bin/rpm"
APT_GET_ZERO = "\n0 upgraded, 0 newly installed"
UPDATE_KERNEL_ZERO = "\nTry to install new kernel "
+def local_rpm_package_name(path):
+ """return package name of a local rpm passed in.
+ Inspired by ansible.builtin.yum"""
+
+ ts = rpm.TransactionSet()
+ ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
+ fd = os.open(path, os.O_RDONLY)
+ try:
+ header = ts.hdrFromFdno(fd)
+ except rpm.error as e:
+ return None
+ finally:
+ os.close(fd)
+
+ return to_native(header[rpm.RPMTAG_NAME])
+
+
def query_package(module, name):
# rpm -q returns 0 if the package is installed,
# 1 if it is not installed
@@ -128,11 +167,38 @@ def query_package(module, name):
return False
+def check_package_version(module, name):
+ # compare installed and candidate version
+ # if newest version already installed return True
+ # otherwise return False
+
+ rc, out, err = module.run_command([APT_CACHE, "policy", name], environ_update={"LANG": "C"})
+ installed = re.split("\n |: ", out)[2]
+ candidate = re.split("\n |: ", out)[4]
+ if installed >= candidate:
+ return True
+ return False
+
+
def query_package_provides(module, name):
# rpm -q returns 0 if the package is installed,
# 1 if it is not installed
+ if name.endswith('.rpm'):
+ # Likely a local RPM file
+ if not HAS_RPM_PYTHON:
+ module.fail_json(
+ msg=missing_required_lib('rpm'),
+ exception=RPM_PYTHON_IMPORT_ERROR,
+ )
+
+ name = local_rpm_package_name(name)
+
rc, out, err = module.run_command("%s -q --provides %s" % (RPM_PATH, name))
- return rc == 0
+ if rc == 0:
+ if check_package_version(module, name):
+ return True
+ else:
+ return False
def update_package_db(module):
diff --git a/ansible_collections/community/general/plugins/modules/archive.py b/ansible_collections/community/general/plugins/modules/archive.py
index 8748fb8a3..6784aa1ac 100644
--- a/ansible_collections/community/general/plugins/modules/archive.py
+++ b/ansible_collections/community/general/plugins/modules/archive.py
@@ -20,7 +20,7 @@ extends_documentation_fragment:
description:
- Creates or extends an archive.
- The source and archive are on the remote host, and the archive I(is not) copied to the local host.
- - Source files can be deleted after archival by specifying I(remove=True).
+ - Source files can be deleted after archival by specifying O(remove=True).
attributes:
check_mode:
support: full
@@ -36,27 +36,26 @@ options:
format:
description:
- The type of compression to use.
- - Support for xz was added in Ansible 2.5.
type: str
choices: [ bz2, gz, tar, xz, zip ]
default: gz
dest:
description:
- The file name of the destination archive. The parent directory must exists on the remote host.
- - This is required when C(path) refers to multiple files by either specifying a glob, a directory or multiple paths in a list.
+ - This is required when O(path) refers to multiple files by either specifying a glob, a directory or multiple paths in a list.
- If the destination archive already exists, it will be truncated and overwritten.
type: path
exclude_path:
description:
- - Remote absolute path, glob, or list of paths or globs for the file or files to exclude from I(path) list and glob expansion.
- - Use I(exclusion_patterns) to instead exclude files or subdirectories below any of the paths from the I(path) list.
+ - Remote absolute path, glob, or list of paths or globs for the file or files to exclude from O(path) list and glob expansion.
+ - Use O(exclusion_patterns) to instead exclude files or subdirectories below any of the paths from the O(path) list.
type: list
elements: path
default: []
exclusion_patterns:
description:
- Glob style patterns to exclude files or directories from the resulting archive.
- - This differs from I(exclude_path) which applies only to the source paths from I(path).
+ - This differs from O(exclude_path) which applies only to the source paths from O(path).
type: list
elements: path
version_added: 3.2.0
@@ -73,7 +72,7 @@ options:
type: bool
default: false
notes:
- - Can produce I(gzip), I(bzip2), I(lzma), and I(zip) compressed files or archives.
+ - Can produce C(gzip), C(bzip2), C(lzma), and C(zip) compressed files or archives.
- This module uses C(tarfile), C(zipfile), C(gzip), and C(bz2) packages on the target host to create archives.
These are part of the Python standard library for Python 2 and 3.
requirements:
@@ -144,16 +143,16 @@ EXAMPLES = r'''
RETURN = r'''
state:
description:
- The state of the input C(path).
+ The state of the input O(path).
type: str
returned: always
dest_state:
description:
- - The state of the I(dest) file.
- - C(absent) when the file does not exist.
- - C(archive) when the file is an archive.
- - C(compress) when the file is compressed, but not an archive.
- - C(incomplete) when the file is an archive, but some files under I(path) were not found.
+ - The state of the O(dest) file.
+ - V(absent) when the file does not exist.
+ - V(archive) when the file is an archive.
+ - V(compress) when the file is compressed, but not an archive.
+ - V(incomplete) when the file is an archive, but some files under O(path) were not found.
type: str
returned: success
version_added: 3.4.0
diff --git a/ansible_collections/community/general/plugins/modules/atomic_container.py b/ansible_collections/community/general/plugins/modules/atomic_container.py
index c26510296..d1567c892 100644
--- a/ansible_collections/community/general/plugins/modules/atomic_container.py
+++ b/ansible_collections/community/general/plugins/modules/atomic_container.py
@@ -21,7 +21,6 @@ notes:
- Host should support C(atomic) command
requirements:
- atomic
- - "python >= 2.6"
extends_documentation_fragment:
- community.general.attributes
attributes:
diff --git a/ansible_collections/community/general/plugins/modules/atomic_host.py b/ansible_collections/community/general/plugins/modules/atomic_host.py
index bb44c4489..ebb74caf1 100644
--- a/ansible_collections/community/general/plugins/modules/atomic_host.py
+++ b/ansible_collections/community/general/plugins/modules/atomic_host.py
@@ -21,7 +21,6 @@ notes:
- Host should be an atomic platform (verified by existence of '/run/ostree-booted' file).
requirements:
- atomic
- - python >= 2.6
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -33,7 +32,7 @@ options:
revision:
description:
- The version number of the atomic host to be deployed.
- - Providing C(latest) will upgrade to the latest available version.
+ - Providing V(latest) will upgrade to the latest available version.
default: 'latest'
aliases: [ version ]
type: str
diff --git a/ansible_collections/community/general/plugins/modules/atomic_image.py b/ansible_collections/community/general/plugins/modules/atomic_image.py
index 65aec1e9d..4bd15e27a 100644
--- a/ansible_collections/community/general/plugins/modules/atomic_image.py
+++ b/ansible_collections/community/general/plugins/modules/atomic_image.py
@@ -21,7 +21,6 @@ notes:
- Host should support C(atomic) command.
requirements:
- atomic
- - python >= 2.6
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -43,7 +42,7 @@ options:
state:
description:
- The state of the container image.
- - The state C(latest) will ensure container image is upgraded to the latest version and forcefully restart container, if running.
+ - The state V(latest) will ensure container image is upgraded to the latest version and forcefully restart container, if running.
choices: [ 'absent', 'latest', 'present' ]
default: 'latest'
type: str
diff --git a/ansible_collections/community/general/plugins/modules/awall.py b/ansible_collections/community/general/plugins/modules/awall.py
index da1b29f70..f3c2384b5 100644
--- a/ansible_collections/community/general/plugins/modules/awall.py
+++ b/ansible_collections/community/general/plugins/modules/awall.py
@@ -16,7 +16,7 @@ short_description: Manage awall policies
author: Ted Trask (@tdtrask) <ttrask01@yahoo.com>
description:
- This modules allows for enable/disable/activate of C(awall) policies.
- - Alpine Wall (I(awall)) generates a firewall configuration from the enabled policy files
+ - Alpine Wall (C(awall)) generates a firewall configuration from the enabled policy files
and activates the configuration on the system.
extends_documentation_fragment:
- community.general.attributes
@@ -41,11 +41,11 @@ options:
description:
- Activate the new firewall rules.
- Can be run with other steps or on its own.
- - Idempotency is affected if I(activate=true), as the module will always report a changed state.
+ - Idempotency is affected if O(activate=true), as the module will always report a changed state.
type: bool
default: false
notes:
- - At least one of I(name) and I(activate) is required.
+ - At least one of O(name) and O(activate) is required.
'''
EXAMPLES = r'''
diff --git a/ansible_collections/community/general/plugins/modules/bearychat.py b/ansible_collections/community/general/plugins/modules/bearychat.py
index 28f1f8fcd..f52737fac 100644
--- a/ansible_collections/community/general/plugins/modules/bearychat.py
+++ b/ansible_collections/community/general/plugins/modules/bearychat.py
@@ -27,7 +27,7 @@ options:
description:
- BearyChat WebHook URL. This authenticates you to the bearychat
service. It looks like
- C(https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60).
+ V(https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60).
required: true
text:
type: str
@@ -35,14 +35,14 @@ options:
- Message to send.
markdown:
description:
- - If C(true), text will be parsed as markdown.
+ - If V(true), text will be parsed as markdown.
default: true
type: bool
channel:
type: str
description:
- Channel to send the message to. If absent, the message goes to the
- default channel selected by the I(url).
+ default channel selected by the O(url).
attachments:
type: list
elements: dict
diff --git a/ansible_collections/community/general/plugins/modules/bigpanda.py b/ansible_collections/community/general/plugins/modules/bigpanda.py
index bab200bc4..7bde5fc1d 100644
--- a/ansible_collections/community/general/plugins/modules/bigpanda.py
+++ b/ansible_collections/community/general/plugins/modules/bigpanda.py
@@ -72,10 +72,10 @@ options:
description:
- Base URL of the API server.
required: false
- default: https://api.bigpanda.io
+ default: "https://api.bigpanda.io"
validate_certs:
description:
- - If C(false), SSL certificates for the target url will not be validated. This should only be used
+ - If V(false), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: true
diff --git a/ansible_collections/community/general/plugins/modules/bitbucket_access_key.py b/ansible_collections/community/general/plugins/modules/bitbucket_access_key.py
index 5ef199f7a..29c19b8b3 100644
--- a/ansible_collections/community/general/plugins/modules/bitbucket_access_key.py
+++ b/ansible_collections/community/general/plugins/modules/bitbucket_access_key.py
@@ -33,7 +33,7 @@ options:
workspace:
description:
- The repository owner.
- - I(username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of I(user).
+ - "B(Note:) O(ignore:username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of O(user)."
type: str
required: true
key:
diff --git a/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_key_pair.py b/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_key_pair.py
index d39c054b1..3bc41c298 100644
--- a/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_key_pair.py
+++ b/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_key_pair.py
@@ -33,7 +33,7 @@ options:
workspace:
description:
- The repository owner.
- - I(username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of I(user).
+ - "B(Note:) O(ignore:username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of O(user)."
type: str
required: true
public_key:
diff --git a/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_known_host.py b/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_known_host.py
index 28ff48739..3e6c4bfbf 100644
--- a/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_known_host.py
+++ b/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_known_host.py
@@ -14,7 +14,7 @@ module: bitbucket_pipeline_known_host
short_description: Manages Bitbucket pipeline known hosts
description:
- Manages Bitbucket pipeline known hosts under the "SSH Keys" menu.
- - The host fingerprint will be retrieved automatically, but in case of an error, one can use I(key) field to specify it manually.
+ - The host fingerprint will be retrieved automatically, but in case of an error, one can use O(key) field to specify it manually.
author:
- Evgeniy Krysanov (@catcombo)
extends_documentation_fragment:
@@ -36,7 +36,7 @@ options:
workspace:
description:
- The repository owner.
- - I(username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of I(user).
+ - "B(Note:) O(ignore:username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of O(user)."
type: str
required: true
name:
diff --git a/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_variable.py b/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_variable.py
index eac0d18dd..1ff8e4375 100644
--- a/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_variable.py
+++ b/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_variable.py
@@ -33,7 +33,7 @@ options:
workspace:
description:
- The repository owner.
- - I(username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of I(user).
+ - "B(Note:) O(ignore:username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of O(user)."
type: str
required: true
name:
@@ -58,7 +58,7 @@ options:
choices: [ absent, present ]
notes:
- Check mode is supported.
- - For secured values return parameter C(changed) is always C(True).
+ - For secured values return parameter C(changed) is always V(true).
'''
EXAMPLES = r'''
diff --git a/ansible_collections/community/general/plugins/modules/btrfs_subvolume.py b/ansible_collections/community/general/plugins/modules/btrfs_subvolume.py
index cd2ac6f97..864bb65a6 100644
--- a/ansible_collections/community/general/plugins/modules/btrfs_subvolume.py
+++ b/ansible_collections/community/general/plugins/modules/btrfs_subvolume.py
@@ -23,7 +23,7 @@ options:
default: false
default:
description:
- - Make the subvolume specified by I(name) the filesystem's default subvolume.
+ - Make the subvolume specified by O(name) the filesystem's default subvolume.
type: bool
default: false
filesystem_device:
@@ -49,7 +49,7 @@ options:
recursive:
description:
- When true, indicates that parent/child subvolumes should be created/removedas necessary
- to complete the operation (for I(state=present) and I(state=absent) respectively).
+ to complete the operation (for O(state=present) and O(state=absent) respectively).
type: bool
default: false
snapshot_source:
@@ -60,11 +60,11 @@ options:
snapshot_conflict:
description:
- Policy defining behavior when a subvolume already exists at the path of the requested snapshot.
- - C(skip) - Create a snapshot only if a subvolume does not yet exist at the target location, otherwise indicate that no change is required.
+ - V(skip) - Create a snapshot only if a subvolume does not yet exist at the target location, otherwise indicate that no change is required.
Warning, this option does not yet verify that the target subvolume was generated from a snapshot of the requested source.
- - C(clobber) - If a subvolume already exists at the requested location, delete it first.
+ - V(clobber) - If a subvolume already exists at the requested location, delete it first.
This option is not idempotent and will result in a new snapshot being generated on every execution.
- - C(error) - If a subvolume already exists at the requested location, return an error.
+ - V(error) - If a subvolume already exists at the requested location, return an error.
This option is not idempotent and will result in an error on replay of the module.
type: str
choices: [ skip, clobber, error ]
@@ -77,7 +77,7 @@ options:
default: present
notes:
- - If any or all of the options I(filesystem_device), I(filesystem_label) or I(filesystem_uuid) parameters are provided, there is expected
+ - If any or all of the options O(filesystem_device), O(filesystem_label) or O(filesystem_uuid) parameters are provided, there is expected
to be a matching btrfs filesystem. If none are provided and only a single btrfs filesystem exists or only a single
btrfs filesystem is mounted, that filesystem will be used; otherwise, the module will take no action and return an error.
@@ -201,7 +201,7 @@ modifications:
target_subvolume_id:
description:
- - The ID of the subvolume specified with the I(name) parameter, either pre-existing or created as part of module execution.
+ - The ID of the subvolume specified with the O(name) parameter, either pre-existing or created as part of module execution.
type: int
sample: 257
returned: Success and subvolume exists after module execution
diff --git a/ansible_collections/community/general/plugins/modules/bundler.py b/ansible_collections/community/general/plugins/modules/bundler.py
index 682dd334a..59f10800c 100644
--- a/ansible_collections/community/general/plugins/modules/bundler.py
+++ b/ansible_collections/community/general/plugins/modules/bundler.py
@@ -30,7 +30,7 @@ options:
state:
type: str
description:
- - The desired state of the Gem bundle. C(latest) updates gems to the most recent, acceptable version
+ - The desired state of the Gem bundle. V(latest) updates gems to the most recent, acceptable version
choices: [present, latest]
default: present
chdir:
@@ -44,19 +44,19 @@ options:
elements: str
description:
- A list of Gemfile groups to exclude during operations. This only
- applies when state is C(present). Bundler considers this
+ applies when O(state=present). Bundler considers this
a 'remembered' property for the Gemfile and will automatically exclude
- groups in future operations even if C(exclude_groups) is not set
+ groups in future operations even if O(exclude_groups) is not set
clean:
description:
- - Only applies if state is C(present). If set removes any gems on the
+ - Only applies if O(state=present). If set removes any gems on the
target host that are not in the gemfile
type: bool
default: false
gemfile:
type: path
description:
- - Only applies if state is C(present). The path to the gemfile to use to install gems.
+ - Only applies if O(state=present). The path to the gemfile to use to install gems.
- If not specified it will default to the Gemfile in current directory
local:
description:
@@ -65,31 +65,31 @@ options:
default: false
deployment_mode:
description:
- - Only applies if state is C(present). If set it will install gems in
+ - Only applies if O(state=present). If set it will install gems in
./vendor/bundle instead of the default location. Requires a Gemfile.lock
file to have been created prior
type: bool
default: false
user_install:
description:
- - Only applies if state is C(present). Installs gems in the local user's cache or for all users
+ - Only applies if O(state=present). Installs gems in the local user's cache or for all users
type: bool
default: true
gem_path:
type: path
description:
- - Only applies if state is C(present). Specifies the directory to
- install the gems into. If C(chdir) is set then this path is relative to
- C(chdir)
+ - Only applies if O(state=present). Specifies the directory to
+ install the gems into. If O(chdir) is set then this path is relative to
+ O(chdir)
- If not specified the default RubyGems gem paths will be used.
binstub_directory:
type: path
description:
- - Only applies if state is C(present). Specifies the directory to
+ - Only applies if O(state=present). Specifies the directory to
install any gem bins files to. When executed the bin files will run
within the context of the Gemfile and fail if any required gem
- dependencies are not installed. If C(chdir) is set then this path is
- relative to C(chdir)
+ dependencies are not installed. If O(chdir) is set then this path is
+ relative to O(chdir)
extra_args:
type: str
description:
diff --git a/ansible_collections/community/general/plugins/modules/bzr.py b/ansible_collections/community/general/plugins/modules/bzr.py
index e7aca7c6b..5a60d765c 100644
--- a/ansible_collections/community/general/plugins/modules/bzr.py
+++ b/ansible_collections/community/general/plugins/modules/bzr.py
@@ -16,7 +16,7 @@ author:
- André Paramés (@andreparames)
short_description: Deploy software (or files) from bzr branches
description:
- - Manage I(bzr) branches to deploy files or software.
+ - Manage C(bzr) branches to deploy files or software.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -44,9 +44,8 @@ options:
type: str
force:
description:
- - If C(true), any modified files in the working
- tree will be discarded. Before 1.9 the default
- value was C(true).
+ - If V(true), any modified files in the working
+ tree will be discarded.
type: bool
default: false
executable:
diff --git a/ansible_collections/community/general/plugins/modules/capabilities.py b/ansible_collections/community/general/plugins/modules/capabilities.py
index 9b72ac6ea..a0b6d5222 100644
--- a/ansible_collections/community/general/plugins/modules/capabilities.py
+++ b/ansible_collections/community/general/plugins/modules/capabilities.py
@@ -30,7 +30,7 @@ options:
aliases: [ key ]
capability:
description:
- - Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent))
+ - Desired capability to set (with operator and flags, if O(state=present)) or remove (if O(state=absent))
type: str
required: true
aliases: [ cap ]
diff --git a/ansible_collections/community/general/plugins/modules/cargo.py b/ansible_collections/community/general/plugins/modules/cargo.py
index 24be43741..ba9c05ed7 100644
--- a/ansible_collections/community/general/plugins/modules/cargo.py
+++ b/ansible_collections/community/general/plugins/modules/cargo.py
@@ -25,6 +25,12 @@ attributes:
diff_mode:
support: none
options:
+ executable:
+ description:
+ - Path to the C(cargo) installed in the system.
+ - If not specified, the module will look C(cargo) in E(PATH).
+ type: path
+ version_added: 7.5.0
name:
description:
- The name of a Rust package to install.
@@ -35,15 +41,23 @@ options:
description:
->
The base path where to install the Rust packages. Cargo automatically appends
- C(/bin). In other words, C(/usr/local) will become C(/usr/local/bin).
+ V(/bin). In other words, V(/usr/local) will become V(/usr/local/bin).
type: path
version:
description:
->
- The version to install. If I(name) contains multiple values, the module will
+ The version to install. If O(name) contains multiple values, the module will
try to install all of them in this version.
type: str
required: false
+ locked:
+ description:
+ - Install with locked dependencies.
+ - This is only used when installing packages.
+ required: false
+ type: bool
+ default: false
+ version_added: 7.5.0
state:
description:
- The state of the Rust package.
@@ -52,7 +66,7 @@ options:
default: present
choices: [ "present", "absent", "latest" ]
requirements:
- - cargo installed in bin path (recommended /usr/local/bin)
+ - cargo installed
"""
EXAMPLES = r"""
@@ -60,6 +74,11 @@ EXAMPLES = r"""
community.general.cargo:
name: ludusavi
+- name: Install "ludusavi" Rust package with locked dependencies
+ community.general.cargo:
+ name: ludusavi
+ locked: true
+
- name: Install "ludusavi" Rust package in version 0.10.0
community.general.cargo:
name: ludusavi
@@ -90,12 +109,12 @@ from ansible.module_utils.basic import AnsibleModule
class Cargo(object):
def __init__(self, module, **kwargs):
self.module = module
+ self.executable = [kwargs["executable"] or module.get_bin_path("cargo", True)]
self.name = kwargs["name"]
self.path = kwargs["path"]
self.state = kwargs["state"]
self.version = kwargs["version"]
-
- self.executable = [module.get_bin_path("cargo", True)]
+ self.locked = kwargs["locked"]
@property
def path(self):
@@ -118,6 +137,10 @@ class Cargo(object):
def get_installed(self):
cmd = ["install", "--list"]
+ if self.path:
+ cmd.append("--root")
+ cmd.append(self.path)
+
data, dummy = self._exec(cmd, True, False, False)
package_regex = re.compile(r"^([\w\-]+) v(.+):$")
@@ -132,6 +155,8 @@ class Cargo(object):
def install(self, packages=None):
cmd = ["install"]
cmd.extend(packages or self.name)
+ if self.locked:
+ cmd.append("--locked")
if self.path:
cmd.append("--root")
cmd.append(self.path)
@@ -160,15 +185,16 @@ class Cargo(object):
def main():
arg_spec = dict(
+ executable=dict(default=None, type="path"),
name=dict(required=True, type="list", elements="str"),
path=dict(default=None, type="path"),
state=dict(default="present", choices=["present", "absent", "latest"]),
version=dict(default=None, type="str"),
+ locked=dict(default=False, type="bool"),
)
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
name = module.params["name"]
- path = module.params["path"]
state = module.params["state"]
version = module.params["version"]
@@ -180,7 +206,7 @@ def main():
LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C"
)
- cargo = Cargo(module, name=name, path=path, state=state, version=version)
+ cargo = Cargo(module, **module.params)
changed, out, err = False, None, None
installed_packages = cargo.get_installed()
if state == "present":
diff --git a/ansible_collections/community/general/plugins/modules/catapult.py b/ansible_collections/community/general/plugins/modules/catapult.py
index a3bbef6c4..acd839851 100644
--- a/ansible_collections/community/general/plugins/modules/catapult.py
+++ b/ansible_collections/community/general/plugins/modules/catapult.py
@@ -28,13 +28,13 @@ options:
src:
type: str
description:
- - One of your catapult telephone numbers the message should come from (must be in E.164 format, like C(+19195551212)).
+ - One of your catapult telephone numbers the message should come from (must be in E.164 format, like V(+19195551212)).
required: true
dest:
type: list
elements: str
description:
- - The phone number or numbers the message should be sent to (must be in E.164 format, like C(+19195551212)).
+ - The phone number or numbers the message should be sent to (must be in E.164 format, like V(+19195551212)).
required: true
msg:
type: str
diff --git a/ansible_collections/community/general/plugins/modules/circonus_annotation.py b/ansible_collections/community/general/plugins/modules/circonus_annotation.py
index 937610776..f3b94a052 100644
--- a/ansible_collections/community/general/plugins/modules/circonus_annotation.py
+++ b/ansible_collections/community/general/plugins/modules/circonus_annotation.py
@@ -52,12 +52,12 @@ options:
type: int
description:
- Unix timestamp of event start
- - If not specified, it defaults to I(now).
+ - If not specified, it defaults to "now".
stop:
type: int
description:
- Unix timestamp of event end
- - If not specified, it defaults to I(now) + I(duration).
+ - If not specified, it defaults to "now" + O(duration).
duration:
type: int
description:
diff --git a/ansible_collections/community/general/plugins/modules/cisco_webex.py b/ansible_collections/community/general/plugins/modules/cisco_webex.py
index 2e5cb50ea..caa77f576 100644
--- a/ansible_collections/community/general/plugins/modules/cisco_webex.py
+++ b/ansible_collections/community/general/plugins/modules/cisco_webex.py
@@ -17,7 +17,7 @@ description:
- Send a message to a Cisco Webex Teams Room or Individual with options to control the formatting.
author: Drew Rusell (@drew-russell)
notes:
- - The C(recipient_id) type must be valid for the supplied C(recipient_id).
+ - The O(recipient_type) must be valid for the supplied O(recipient_id).
- Full API documentation can be found at U(https://developer.webex.com/docs/api/basics).
extends_documentation_fragment:
@@ -40,7 +40,7 @@ options:
recipient_id:
description:
- - The unique identifier associated with the supplied C(recipient_type).
+ - The unique identifier associated with the supplied O(recipient_type).
required: true
type: str
diff --git a/ansible_collections/community/general/plugins/modules/clc_firewall_policy.py b/ansible_collections/community/general/plugins/modules/clc_firewall_policy.py
index c832571d3..b30037c6f 100644
--- a/ansible_collections/community/general/plugins/modules/clc_firewall_policy.py
+++ b/ansible_collections/community/general/plugins/modules/clc_firewall_policy.py
@@ -49,7 +49,7 @@ options:
description:
- The list of ports associated with the policy.
TCP and UDP can take in single ports or port ranges.
- - "Example: C(['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456'])."
+ - "Example: V(['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456'])."
type: list
elements: str
firewall_policy_id:
diff --git a/ansible_collections/community/general/plugins/modules/clc_server.py b/ansible_collections/community/general/plugins/modules/clc_server.py
index d2d019ff0..6bfe5a9b9 100644
--- a/ansible_collections/community/general/plugins/modules/clc_server.py
+++ b/ansible_collections/community/general/plugins/modules/clc_server.py
@@ -1501,7 +1501,7 @@ class ClcServer:
return aa_policy_id
#
- # This is the function that gets patched to the Request.server object using a lamda closure
+ # This is the function that gets patched to the Request.server object using a lambda closure
#
@staticmethod
diff --git a/ansible_collections/community/general/plugins/modules/cloudflare_dns.py b/ansible_collections/community/general/plugins/modules/cloudflare_dns.py
index 8f45fcef3..d2bea4266 100644
--- a/ansible_collections/community/general/plugins/modules/cloudflare_dns.py
+++ b/ansible_collections/community/general/plugins/modules/cloudflare_dns.py
@@ -13,8 +13,6 @@ DOCUMENTATION = r'''
module: cloudflare_dns
author:
- Michael Gruener (@mgruener)
-requirements:
- - python >= 2.6
short_description: Manage Cloudflare DNS records
description:
- "Manages dns records via the Cloudflare API, see the docs: U(https://api.cloudflare.com/)."
@@ -31,7 +29,7 @@ options:
- API token.
- Required for api token authentication.
- "You can obtain your API token from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)."
- - Can be specified in C(CLOUDFLARE_TOKEN) environment variable since community.general 2.0.0.
+ - Can be specified in E(CLOUDFLARE_TOKEN) environment variable since community.general 2.0.0.
type: str
required: false
version_added: '0.2.0'
@@ -51,41 +49,54 @@ options:
algorithm:
description:
- Algorithm number.
- - Required for I(type=DS) and I(type=SSHFP) when I(state=present).
+ - Required for O(type=DS) and O(type=SSHFP) when O(state=present).
type: int
cert_usage:
description:
- Certificate usage number.
- - Required for I(type=TLSA) when I(state=present).
+ - Required for O(type=TLSA) when O(state=present).
type: int
choices: [ 0, 1, 2, 3 ]
+ flag:
+ description:
+ - Issuer Critical Flag.
+ - Required for O(type=CAA) when O(state=present).
+ type: int
+ choices: [ 0, 1 ]
+ version_added: 8.0.0
+ tag:
+ description:
+ - CAA issue restriction.
+ - Required for O(type=CAA) when O(state=present).
+ type: str
+ choices: [ issue, issuewild, iodef ]
+ version_added: 8.0.0
hash_type:
description:
- Hash type number.
- - Required for I(type=DS), I(type=SSHFP) and I(type=TLSA) when I(state=present).
+ - Required for O(type=DS), O(type=SSHFP) and O(type=TLSA) when O(state=present).
type: int
choices: [ 1, 2 ]
key_tag:
description:
- DNSSEC key tag.
- - Needed for I(type=DS) when I(state=present).
+ - Needed for O(type=DS) when O(state=present).
type: int
port:
description:
- Service port.
- - Required for I(type=SRV) and I(type=TLSA).
+ - Required for O(type=SRV) and O(type=TLSA).
type: int
priority:
description:
- Record priority.
- - Required for I(type=MX) and I(type=SRV)
+ - Required for O(type=MX) and O(type=SRV)
default: 1
type: int
proto:
description:
- - Service protocol. Required for I(type=SRV) and I(type=TLSA).
+ - Service protocol. Required for O(type=SRV) and O(type=TLSA).
- Common values are TCP and UDP.
- - Before Ansible 2.6 only TCP and UDP were available.
type: str
proxied:
description:
@@ -95,26 +106,26 @@ options:
record:
description:
- Record to add.
- - Required if I(state=present).
- - Default is C(@) (e.g. the zone name).
+ - Required if O(state=present).
+ - Default is V(@) (that is, the zone name).
type: str
default: '@'
aliases: [ name ]
selector:
description:
- Selector number.
- - Required for I(type=TLSA) when I(state=present).
+ - Required for O(type=TLSA) when O(state=present).
choices: [ 0, 1 ]
type: int
service:
description:
- Record service.
- - Required for I(type=SRV).
+ - Required for O(type=SRV).
type: str
solo:
description:
- Whether the record should be the only one for that record type and record name.
- - Only use with I(state=present).
+ - Only use with O(state=present).
- This will delete all other records with the same record name and type.
type: bool
state:
@@ -136,20 +147,20 @@ options:
default: 1
type:
description:
- - The type of DNS record to create. Required if I(state=present).
- - I(type=DS), I(type=SSHFP) and I(type=TLSA) added in Ansible 2.7.
+ - The type of DNS record to create. Required if O(state=present).
+ - Note that V(SPF) is no longer supported by CloudFlare. Support for it will be removed from community.general 9.0.0.
type: str
- choices: [ A, AAAA, CNAME, DS, MX, NS, SPF, SRV, SSHFP, TLSA, TXT ]
+ choices: [ A, AAAA, CNAME, DS, MX, NS, SPF, SRV, SSHFP, TLSA, CAA, TXT ]
value:
description:
- The record value.
- - Required for I(state=present).
+ - Required for O(state=present).
type: str
aliases: [ content ]
weight:
description:
- Service weight.
- - Required for I(type=SRV).
+ - Required for O(type=SRV).
type: int
default: 1
zone:
@@ -262,6 +273,15 @@ EXAMPLES = r'''
hash_type: 1
value: 6b76d034492b493e15a7376fccd08e63befdad0edab8e442562f532338364bf3
+- name: Create a CAA record subdomain.example.com
+ community.general.cloudflare_dns:
+ zone: example.com
+ record: subdomain
+ type: CAA
+ flag: 0
+ tag: issue
+ value: ca.example.com
+
- name: Create a DS record for subdomain.example.com
community.general.cloudflare_dns:
zone: example.com
@@ -291,7 +311,7 @@ record:
sample: "2016-03-25T19:09:42.516553Z"
data:
description: Additional record data.
- returned: success, if type is SRV, DS, SSHFP or TLSA
+ returned: success, if type is SRV, DS, SSHFP TLSA or CAA
type: dict
sample: {
name: "jabber",
@@ -391,6 +411,8 @@ class CloudflareAPI(object):
self.algorithm = module.params['algorithm']
self.cert_usage = module.params['cert_usage']
self.hash_type = module.params['hash_type']
+ self.flag = module.params['flag']
+ self.tag = module.params['tag']
self.key_tag = module.params['key_tag']
self.port = module.params['port']
self.priority = module.params['priority']
@@ -595,7 +617,7 @@ class CloudflareAPI(object):
def delete_dns_records(self, **kwargs):
params = {}
for param in ['port', 'proto', 'service', 'solo', 'type', 'record', 'value', 'weight', 'zone',
- 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag']:
+ 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag', 'flag', 'tag']:
if param in kwargs:
params[param] = kwargs[param]
else:
@@ -613,7 +635,7 @@ class CloudflareAPI(object):
content = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
elif params['type'] == 'SSHFP':
if not (params['value'] is None or params['value'] == ''):
- content = str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+ content = str(params['algorithm']) + ' ' + str(params['hash_type']) + ' ' + params['value'].upper()
elif params['type'] == 'TLSA':
if not (params['value'] is None or params['value'] == ''):
content = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value']
@@ -640,7 +662,7 @@ class CloudflareAPI(object):
def ensure_dns_record(self, **kwargs):
params = {}
for param in ['port', 'priority', 'proto', 'proxied', 'service', 'ttl', 'type', 'record', 'value', 'weight', 'zone',
- 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag']:
+ 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag', 'flag', 'tag']:
if param in kwargs:
params[param] = kwargs[param]
else:
@@ -726,7 +748,7 @@ class CloudflareAPI(object):
if (attr is None) or (attr == ''):
self.module.fail_json(msg="You must provide algorithm, hash_type and a value to create this record type")
sshfp_data = {
- "fingerprint": params['value'],
+ "fingerprint": params['value'].upper(),
"type": params['hash_type'],
"algorithm": params['algorithm'],
}
@@ -736,7 +758,7 @@ class CloudflareAPI(object):
'data': sshfp_data,
"ttl": params['ttl'],
}
- search_value = str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+ search_value = str(params['algorithm']) + ' ' + str(params['hash_type']) + ' ' + params['value']
if params['type'] == 'TLSA':
for attr in [params['port'], params['proto'], params['cert_usage'], params['selector'], params['hash_type'], params['value']]:
@@ -757,12 +779,36 @@ class CloudflareAPI(object):
}
search_value = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+ if params['type'] == 'CAA':
+ for attr in [params['flag'], params['tag'], params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide flag, tag and a value to create this record type")
+ caa_data = {
+ "flags": params['flag'],
+ "tag": params['tag'],
+ "value": params['value'],
+ }
+ new_record = {
+ "type": params['type'],
+ "name": params['record'],
+ 'data': caa_data,
+ "ttl": params['ttl'],
+ }
+ search_value = None
+
zone_id = self._get_zone_id(params['zone'])
records = self.get_dns_records(params['zone'], params['type'], search_record, search_value)
# in theory this should be impossible as cloudflare does not allow
# the creation of duplicate records but lets cover it anyways
if len(records) > 1:
- self.module.fail_json(msg="More than one record already exists for the given attributes. That should be impossible, please open an issue!")
+ # As Cloudflare API cannot filter record containing quotes
+ # CAA records must be compared locally
+ if params['type'] == 'CAA':
+ for rr in records:
+ if rr['data']['flags'] == caa_data['flags'] and rr['data']['tag'] == caa_data['tag'] and rr['data']['value'] == caa_data['value']:
+ return rr, self.changed
+ else:
+ self.module.fail_json(msg="More than one record already exists for the given attributes. That should be impossible, please open an issue!")
# record already exists, check if it must be updated
if len(records) == 1:
cur_record = records[0]
@@ -811,6 +857,8 @@ def main():
hash_type=dict(type='int', choices=[1, 2]),
key_tag=dict(type='int', no_log=False),
port=dict(type='int'),
+ flag=dict(type='int', choices=[0, 1]),
+ tag=dict(type='str', choices=['issue', 'issuewild', 'iodef']),
priority=dict(type='int', default=1),
proto=dict(type='str'),
proxied=dict(type='bool', default=False),
@@ -821,7 +869,7 @@ def main():
state=dict(type='str', default='present', choices=['absent', 'present']),
timeout=dict(type='int', default=30),
ttl=dict(type='int', default=1),
- type=dict(type='str', choices=['A', 'AAAA', 'CNAME', 'DS', 'MX', 'NS', 'SPF', 'SRV', 'SSHFP', 'TLSA', 'TXT']),
+ type=dict(type='str', choices=['A', 'AAAA', 'CNAME', 'DS', 'MX', 'NS', 'SPF', 'SRV', 'SSHFP', 'TLSA', 'CAA', 'TXT']),
value=dict(type='str', aliases=['content']),
weight=dict(type='int', default=1),
zone=dict(type='str', required=True, aliases=['domain']),
@@ -832,6 +880,7 @@ def main():
('state', 'absent', ['record']),
('type', 'SRV', ['proto', 'service']),
('type', 'TLSA', ['proto', 'port']),
+ ('type', 'CAA', ['flag', 'tag']),
],
)
@@ -858,6 +907,13 @@ def main():
and (module.params['value'] is None or module.params['value'] == ''))):
module.fail_json(msg="For TLSA records the params cert_usage, selector, hash_type and value all need to be defined, or not at all.")
+ if module.params['type'] == 'CAA':
+ if not ((module.params['flag'] is not None and module.params['tag'] is not None
+ and not (module.params['value'] is None or module.params['value'] == ''))
+ or (module.params['flag'] is None and module.params['tag'] is None
+ and (module.params['value'] is None or module.params['value'] == ''))):
+ module.fail_json(msg="For CAA records the params flag, tag and value all need to be defined, or not at all.")
+
if module.params['type'] == 'DS':
if not ((module.params['key_tag'] is not None and module.params['algorithm'] is not None and module.params['hash_type'] is not None
and not (module.params['value'] is None or module.params['value'] == ''))
diff --git a/ansible_collections/community/general/plugins/modules/cobbler_sync.py b/ansible_collections/community/general/plugins/modules/cobbler_sync.py
index d7acf4be6..4ec87c96c 100644
--- a/ansible_collections/community/general/plugins/modules/cobbler_sync.py
+++ b/ansible_collections/community/general/plugins/modules/cobbler_sync.py
@@ -30,7 +30,7 @@ options:
port:
description:
- Port number to be used for REST connection.
- - The default value depends on parameter C(use_ssl).
+ - The default value depends on parameter O(use_ssl).
type: int
username:
description:
@@ -43,13 +43,13 @@ options:
type: str
use_ssl:
description:
- - If C(false), an HTTP connection will be used instead of the default HTTPS connection.
+ - If V(false), an HTTP connection will be used instead of the default HTTPS connection.
type: bool
default: true
validate_certs:
description:
- - If C(false), SSL certificates will not be validated.
- - This should only set to C(false) when used on personally controlled sites using self-signed certificates.
+ - If V(false), SSL certificates will not be validated.
+ - This should only set to V(false) when used on personally controlled sites using self-signed certificates.
type: bool
default: true
author:
diff --git a/ansible_collections/community/general/plugins/modules/cobbler_system.py b/ansible_collections/community/general/plugins/modules/cobbler_system.py
index c30b4f1c1..cecc02f71 100644
--- a/ansible_collections/community/general/plugins/modules/cobbler_system.py
+++ b/ansible_collections/community/general/plugins/modules/cobbler_system.py
@@ -30,7 +30,7 @@ options:
port:
description:
- Port number to be used for REST connection.
- - The default value depends on parameter C(use_ssl).
+ - The default value depends on parameter O(use_ssl).
type: int
username:
description:
@@ -43,13 +43,13 @@ options:
type: str
use_ssl:
description:
- - If C(false), an HTTP connection will be used instead of the default HTTPS connection.
+ - If V(false), an HTTP connection will be used instead of the default HTTPS connection.
type: bool
default: true
validate_certs:
description:
- - If C(false), SSL certificates will not be validated.
- - This should only set to C(false) when used on personally controlled sites using self-signed certificates.
+ - If V(false), SSL certificates will not be validated.
+ - This should only set to V(false) when used on personally controlled sites using self-signed certificates.
type: bool
default: true
name:
@@ -144,11 +144,11 @@ EXAMPLES = r'''
RETURN = r'''
systems:
description: List of systems
- returned: I(state=query) and I(name) is not provided
+ returned: O(state=query) and O(name) is not provided
type: list
system:
description: (Resulting) information about the system we are working with
- returned: when I(name) is provided
+ returned: when O(name) is provided
type: dict
'''
diff --git a/ansible_collections/community/general/plugins/modules/composer.py b/ansible_collections/community/general/plugins/modules/composer.py
index 793abcda1..3d1c4a346 100644
--- a/ansible_collections/community/general/plugins/modules/composer.py
+++ b/ansible_collections/community/general/plugins/modules/composer.py
@@ -49,7 +49,7 @@ options:
description:
- Directory of your project (see --working-dir). This is required when
the command is not run globally.
- - Will be ignored if I(global_command=true).
+ - Will be ignored if O(global_command=true).
global_command:
description:
- Runs the specified command globally.
@@ -107,11 +107,11 @@ options:
composer_executable:
type: path
description:
- - Path to composer executable on the remote host, if composer is not in C(PATH) or a custom composer is needed.
+ - Path to composer executable on the remote host, if composer is not in E(PATH) or a custom composer is needed.
version_added: 3.2.0
requirements:
- php
- - composer installed in bin path (recommended /usr/local/bin) or specified in I(composer_executable)
+ - composer installed in bin path (recommended /usr/local/bin) or specified in O(composer_executable)
notes:
- Default options that are always appended in each execution are --no-ansi, --no-interaction and --no-progress if available.
- We received reports about issues on macOS if composer was installed by Homebrew. Please use the official install method to avoid issues.
@@ -170,10 +170,15 @@ def get_available_options(module, command='install'):
return command_help_json['definition']['options']
-def composer_command(module, command, arguments="", options=None, global_command=False):
+def composer_command(module, command, arguments="", options=None):
if options is None:
options = []
+ global_command = module.params['global_command']
+
+ if not global_command:
+ options.extend(['--working-dir', "'%s'" % module.params['working_dir']])
+
if module.params['executable'] is None:
php_path = module.get_bin_path("php", True, ["/usr/local/bin"])
else:
@@ -217,7 +222,6 @@ def main():
module.fail_json(msg="Use the 'arguments' param for passing arguments with the 'command'")
arguments = module.params['arguments']
- global_command = module.params['global_command']
available_options = get_available_options(module=module, command=command)
options = []
@@ -234,9 +238,6 @@ def main():
option = "--%s" % option
options.append(option)
- if not global_command:
- options.extend(['--working-dir', "'%s'" % module.params['working_dir']])
-
option_params = {
'prefer_source': 'prefer-source',
'prefer_dist': 'prefer-dist',
@@ -260,7 +261,7 @@ def main():
else:
module.exit_json(skipped=True, msg="command '%s' does not support check mode, skipping" % command)
- rc, out, err = composer_command(module, command, arguments, options, global_command)
+ rc, out, err = composer_command(module, command, arguments, options)
if rc != 0:
output = parse_out(err)
diff --git a/ansible_collections/community/general/plugins/modules/consul.py b/ansible_collections/community/general/plugins/modules/consul.py
index cc599be36..fe1a89883 100644
--- a/ansible_collections/community/general/plugins/modules/consul.py
+++ b/ansible_collections/community/general/plugins/modules/consul.py
@@ -21,8 +21,8 @@ description:
notify the health of the entire node to the cluster.
Service level checks do not require a check name or id as these are derived
by Consul from the Service name and id respectively by appending 'service:'
- Node level checks require a I(check_name) and optionally a I(check_id)."
- - Currently, there is no complete way to retrieve the script, interval or ttl
+ Node level checks require a O(check_name) and optionally a O(check_id)."
+ - Currently, there is no complete way to retrieve the script, interval or TTL
metadata for a registered check. Without this metadata it is not possible to
tell if the data supplied with ansible represents a change to a check. As a
result this does not attempt to determine changes and will always report a
@@ -56,7 +56,7 @@ options:
service_id:
type: str
description:
- - The ID for the service, must be unique per node. If I(state=absent),
+ - The ID for the service, must be unique per node. If O(state=absent),
defaults to the service name if supplied.
host:
type: str
@@ -86,12 +86,12 @@ options:
type: int
description:
- The port on which the service is listening. Can optionally be supplied for
- registration of a service, i.e. if I(service_name) or I(service_id) is set.
+ registration of a service, that is if O(service_name) or O(service_id) is set.
service_address:
type: str
description:
- The address to advertise that the service will be listening on.
- This value will be passed as the I(address) parameter to Consul's
+ This value will be passed as the C(address) parameter to Consul's
C(/v1/agent/service/register) API method, so refer to the Consul API
documentation for further details.
tags:
@@ -103,55 +103,69 @@ options:
type: str
description:
- The script/command that will be run periodically to check the health of the service.
- - Requires I(interval) to be provided.
+ - Requires O(interval) to be provided.
+ - Mutually exclusive with O(ttl), O(tcp) and O(http).
interval:
type: str
description:
- The interval at which the service check will be run.
- This is a number with a C(s) or C(m) suffix to signify the units of seconds or minutes e.g C(15s) or C(1m).
- If no suffix is supplied C(s) will be used by default, e.g. C(10) will be C(10s).
- - Required if one of the parameters I(script), I(http), or I(tcp) is specified.
+ This is a number with a V(s) or V(m) suffix to signify the units of seconds or minutes, for example V(15s) or V(1m).
+ If no suffix is supplied V(s) will be used by default, for example V(10) will be V(10s).
+ - Required if one of the parameters O(script), O(http), or O(tcp) is specified.
check_id:
type: str
description:
- - An ID for the service check. If I(state=absent), defaults to
- I(check_name). Ignored if part of a service definition.
+ - An ID for the service check. If O(state=absent), defaults to
+ O(check_name). Ignored if part of a service definition.
check_name:
type: str
description:
- Name for the service check. Required if standalone, ignored if
part of service definition.
+ check_node:
+ description:
+ - Node name.
+ # TODO: properly document!
+ type: str
+ check_host:
+ description:
+ - Host name.
+ # TODO: properly document!
+ type: str
ttl:
type: str
description:
- - Checks can be registered with a ttl instead of a I(script) and I(interval)
+ - Checks can be registered with a TTL instead of a O(script) and O(interval)
this means that the service will check in with the agent before the
- ttl expires. If it doesn't the check will be considered failed.
+ TTL expires. If it doesn't the check will be considered failed.
Required if registering a check and the script an interval are missing
- Similar to the interval this is a number with a C(s) or C(m) suffix to
- signify the units of seconds or minutes e.g C(15s) or C(1m).
- If no suffix is supplied C(s) will be used by default, e.g. C(10) will be C(10s).
+ Similar to the interval this is a number with a V(s) or V(m) suffix to
+ signify the units of seconds or minutes, for example V(15s) or V(1m).
+ If no suffix is supplied V(s) will be used by default, for example V(10) will be V(10s).
+ - Mutually exclusive with O(script), O(tcp) and O(http).
tcp:
type: str
description:
- Checks can be registered with a TCP port. This means that consul
will check if the connection attempt to that port is successful (that is, the port is currently accepting connections).
- The format is C(host:port), for example C(localhost:80).
- - Requires I(interval) to be provided.
+ The format is V(host:port), for example V(localhost:80).
+ - Requires O(interval) to be provided.
+ - Mutually exclusive with O(script), O(ttl) and O(http).
version_added: '1.3.0'
http:
type: str
description:
- Checks can be registered with an HTTP endpoint. This means that consul
will check that the http endpoint returns a successful HTTP status.
- - Requires I(interval) to be provided.
+ - Requires O(interval) to be provided.
+ - Mutually exclusive with O(script), O(ttl) and O(tcp).
timeout:
type: str
description:
- A custom HTTP check timeout. The consul default is 10 seconds.
- Similar to the interval this is a number with a C(s) or C(m) suffix to
- signify the units of seconds or minutes, e.g. C(15s) or C(1m).
- If no suffix is supplied C(s) will be used by default, e.g. C(10) will be C(10s).
+ Similar to the interval this is a number with a V(s) or V(m) suffix to
+ signify the units of seconds or minutes, for example V(15s) or V(1m).
+ If no suffix is supplied V(s) will be used by default, for example V(10) will be V(10s).
token:
type: str
description:
@@ -159,7 +173,7 @@ options:
ack_params_state_absent:
type: bool
description:
- - Disable deprecation warning when using parameters incompatible with I(state=absent).
+ - This parameter has no more effect and is deprecated. It will be removed in community.general 10.0.0.
'''
EXAMPLES = '''
@@ -377,13 +391,7 @@ def get_service_by_id_or_name(consul_api, service_id_or_name):
def parse_check(module):
- _checks = [module.params[p] for p in ('script', 'ttl', 'tcp', 'http') if module.params[p]]
-
- if len(_checks) > 1:
- module.fail_json(
- msg='checks are either script, tcp, http or ttl driven, supplying more than one does not make sense')
-
- if module.params['check_id'] or _checks:
+ if module.params['check_id'] or any(module.params[p] is not None for p in ('script', 'ttl', 'tcp', 'http')):
return ConsulCheck(
module.params['check_id'],
module.params['check_name'],
@@ -501,15 +509,9 @@ class ConsulCheck(object):
self.check = consul.Check.ttl(self.ttl)
if http:
- if interval is None:
- raise Exception('http check must specify interval')
-
self.check = consul.Check.http(http, self.interval, self.timeout)
if tcp:
- if interval is None:
- raise Exception('tcp check must specify interval')
-
regex = r"(?P<host>.*):(?P<port>(?:[0-9]+))$"
match = re.match(regex, tcp)
@@ -596,30 +598,33 @@ def main():
timeout=dict(type='str'),
tags=dict(type='list', elements='str'),
token=dict(no_log=True),
- ack_params_state_absent=dict(type='bool'),
+ ack_params_state_absent=dict(
+ type='bool',
+ removed_in_version='10.0.0',
+ removed_from_collection='community.general',
+ ),
),
+ mutually_exclusive=[
+ ('script', 'ttl', 'tcp', 'http'),
+ ],
required_if=[
('state', 'present', ['service_name']),
('state', 'absent', ['service_id', 'service_name', 'check_id', 'check_name'], True),
],
+ required_by={
+ 'script': 'interval',
+ 'http': 'interval',
+ 'tcp': 'interval',
+ },
supports_check_mode=False,
)
p = module.params
test_dependencies(module)
- if p['state'] == 'absent' and any(p[x] for x in ['script', 'ttl', 'tcp', 'http', 'interval']) and not p['ack_params_state_absent']:
- module.deprecate(
- "The use of parameters 'script', 'ttl', 'tcp', 'http', 'interval' along with 'state=absent' is deprecated. "
- "In community.general 8.0.0 their use will become an error. "
- "To suppress this deprecation notice, set parameter ack_params_state_absent=true.",
- version="8.0.0",
- collection_name="community.general",
+ if p['state'] == 'absent' and any(p[x] for x in ['script', 'ttl', 'tcp', 'http', 'interval']):
+ module.fail_json(
+ msg="The use of parameters 'script', 'ttl', 'tcp', 'http', 'interval' along with 'state=absent' is no longer allowed."
)
- # When reaching c.g 8.0.0:
- # - Replace the deprecation with a fail_json(), remove the "ack_params_state_absent" condition from the "if"
- # - Add mutually_exclusive for ('script', 'ttl', 'tcp', 'http'), then remove that validation from parse_check()
- # - Add required_by {'script': 'interval', 'http': 'interval', 'tcp': 'interval'}, then remove checks for 'interval' in ConsulCheck.__init__()
- # - Deprecate the parameter ack_params_state_absent
try:
register_with_consul(module)
diff --git a/ansible_collections/community/general/plugins/modules/consul_acl.py b/ansible_collections/community/general/plugins/modules/consul_acl.py
index 91f955228..4617090fd 100644
--- a/ansible_collections/community/general/plugins/modules/consul_acl.py
+++ b/ansible_collections/community/general/plugins/modules/consul_acl.py
@@ -26,6 +26,10 @@ attributes:
support: none
diff_mode:
support: none
+deprecated:
+ removed_in: 10.0.0
+ why: The legacy ACL system was removed from Consul.
+ alternative: Use M(community.general.consul_token) and/or M(community.general.consul_policy) instead.
options:
mgmt_token:
description:
@@ -156,7 +160,7 @@ token:
rules:
description: the HCL JSON representation of the rules associated to the ACL, in the format described in the
Consul documentation (https://www.consul.io/docs/guides/acl.html#rule-specification).
- returned: I(status) == "present"
+ returned: when O(state=present)
type: dict
sample: {
"key": {
diff --git a/ansible_collections/community/general/plugins/modules/consul_acl_bootstrap.py b/ansible_collections/community/general/plugins/modules/consul_acl_bootstrap.py
new file mode 100644
index 000000000..bf1da110b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/consul_acl_bootstrap.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2024, Florian Apolloner (@apollo13)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: consul_acl_bootstrap
+short_description: Bootstrap ACLs in Consul
+version_added: 8.3.0
+description:
+ - Allows bootstrapping of ACLs in a Consul cluster, see
+ U(https://developer.hashicorp.com/consul/api-docs/acl#bootstrap-acls) for details.
+author:
+ - Florian Apolloner (@apollo13)
+extends_documentation_fragment:
+ - community.general.consul
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Whether the token should be present or absent.
+ choices: ['present', 'bootstrapped']
+ default: present
+ type: str
+ bootstrap_secret:
+ description:
+ - The secret to be used as secret ID for the initial token.
+ - Needs to be an UUID.
+ type: str
+"""
+
+EXAMPLES = """
+- name: Bootstrap the ACL system
+ community.general.consul_acl_bootstrap:
+ bootstrap_secret: 22eaeed1-bdbd-4651-724e-42ae6c43e387
+"""
+
+RETURN = """
+result:
+ description:
+ - The bootstrap result as returned by the consul HTTP API.
+ - "B(Note:) If O(bootstrap_secret) has been specified the C(SecretID) and
+ C(ID) will not contain the secret but C(VALUE_SPECIFIED_IN_NO_LOG_PARAMETER).
+ If you pass O(bootstrap_secret), make sure your playbook/role does not depend
+ on this return value!"
+ returned: changed
+ type: dict
+ sample:
+ AccessorID: 834a5881-10a9-a45b-f63c-490e28743557
+ CreateIndex: 25
+ CreateTime: '2024-01-21T20:26:27.114612038+01:00'
+ Description: Bootstrap Token (Global Management)
+ Hash: X2AgaFhnQGRhSSF/h0m6qpX1wj/HJWbyXcxkEM/5GrY=
+ ID: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER
+ Local: false
+ ModifyIndex: 25
+ Policies:
+ - ID: 00000000-0000-0000-0000-000000000001
+ Name: global-management
+ SecretID: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.consul import (
+ AUTH_ARGUMENTS_SPEC,
+ RequestError,
+ _ConsulModule,
+)
+
+_ARGUMENT_SPEC = {
+ "state": dict(type="str", choices=["present", "bootstrapped"], default="present"),
+ "bootstrap_secret": dict(type="str", no_log=True),
+}
+_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC)
+_ARGUMENT_SPEC.pop("token")
+
+
+def main():
+ module = AnsibleModule(_ARGUMENT_SPEC)
+ consul_module = _ConsulModule(module)
+
+ data = {}
+ if "bootstrap_secret" in module.params:
+ data["BootstrapSecret"] = module.params["bootstrap_secret"]
+
+ try:
+ response = consul_module.put("acl/bootstrap", data=data)
+ except RequestError as e:
+ if e.status == 403 and b"ACL bootstrap no longer allowed" in e.response_data:
+ return module.exit_json(changed=False)
+ raise
+ else:
+ return module.exit_json(changed=True, result=response)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/consul_auth_method.py b/ansible_collections/community/general/plugins/modules/consul_auth_method.py
new file mode 100644
index 000000000..afe549f6e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/consul_auth_method.py
@@ -0,0 +1,207 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2024, Florian Apolloner (@apollo13)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: consul_auth_method
+short_description: Manipulate Consul auth methods
+version_added: 8.3.0
+description:
+ - Allows the addition, modification and deletion of auth methods in a consul
+ cluster via the agent. For more details on using and configuring ACLs,
+ see U(https://www.consul.io/docs/guides/acl.html).
+author:
+ - Florian Apolloner (@apollo13)
+extends_documentation_fragment:
+ - community.general.consul
+ - community.general.consul.actiongroup_consul
+ - community.general.consul.token
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: partial
+ details:
+ - In check mode the diff will miss operational attributes.
+options:
+ state:
+ description:
+ - Whether the token should be present or absent.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ name:
+ description:
+ - Specifies a name for the ACL auth method.
+ - The name can contain alphanumeric characters, dashes C(-), and underscores C(_).
+ type: str
+ required: true
+ type:
+ description:
+ - The type of auth method being configured.
+ - This field is immutable.
+ - Required when the auth method is created.
+ type: str
+ choices: ['kubernetes', 'jwt', 'oidc', 'aws-iam']
+ description:
+ description:
+ - Free form human readable description of the auth method.
+ type: str
+ display_name:
+ description:
+ - An optional name to use instead of O(name) when displaying information about this auth method.
+ type: str
+ max_token_ttl:
+ description:
+ - This specifies the maximum life of any token created by this auth method.
+ - Can be specified in the form of V(60s) or V(5m) (that is, 60 seconds or 5 minutes, respectively).
+ type: str
+ token_locality:
+ description:
+ - Defines the kind of token that this auth method should produce.
+ type: str
+ choices: ['local', 'global']
+ config:
+ description:
+ - The raw configuration to use for the chosen auth method.
+ - Contents will vary depending upon the type chosen.
+ - Required when the auth method is created.
+ type: dict
+"""
+
+EXAMPLES = """
+- name: Create an auth method
+ community.general.consul_auth_method:
+ name: test
+ type: jwt
+ config:
+ jwt_validation_pubkeys:
+ - |
+ -----BEGIN PUBLIC KEY-----
+ MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu1SU1LfVLPHCozMxH2Mo
+ 4lgOEePzNm0tRgeLezV6ffAt0gunVTLw7onLRnrq0/IzW7yWR7QkrmBL7jTKEn5u
+ +qKhbwKfBstIs+bMY2Zkp18gnTxKLxoS2tFczGkPLPgizskuemMghRniWaoLcyeh
+ kd3qqGElvW/VDL5AaWTg0nLVkjRo9z+40RQzuVaE8AkAFmxZzow3x+VJYKdjykkJ
+ 0iT9wCS0DRTXu269V264Vf/3jvredZiKRkgwlL9xNAwxXFg0x/XFw005UWVRIkdg
+ cKWTjpBP2dPwVZ4WWC+9aGVd+Gyn1o0CLelf4rEjGoXbAAEgAqeGUxrcIlbjXfbc
+ mwIDAQAB
+ -----END PUBLIC KEY-----
+ token: "{{ consul_management_token }}"
+
+- name: Delete auth method
+ community.general.consul_auth_method:
+ name: test
+ state: absent
+ token: "{{ consul_management_token }}"
+"""
+
+RETURN = """
+auth_method:
+ description: The auth method as returned by the consul HTTP API.
+ returned: always
+ type: dict
+ sample:
+ Config:
+ JWTValidationPubkeys:
+ - |-
+ -----BEGIN PUBLIC KEY-----
+ MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu1SU1LfVLPHCozMxH2Mo
+ 4lgOEePzNm0tRgeLezV6ffAt0gunVTLw7onLRnrq0/IzW7yWR7QkrmBL7jTKEn5u
+ +qKhbwKfBstIs+bMY2Zkp18gnTxKLxoS2tFczGkPLPgizskuemMghRniWaoLcyeh
+ kd3qqGElvW/VDL5AaWTg0nLVkjRo9z+40RQzuVaE8AkAFmxZzow3x+VJYKdjykkJ
+ 0iT9wCS0DRTXu269V264Vf/3jvredZiKRkgwlL9xNAwxXFg0x/XFw005UWVRIkdg
+ cKWTjpBP2dPwVZ4WWC+9aGVd+Gyn1o0CLelf4rEjGoXbAAEgAqeGUxrcIlbjXfbc
+ mwIDAQAB
+ -----END PUBLIC KEY-----
+ CreateIndex: 416
+ ModifyIndex: 487
+ Name: test
+ Type: jwt
+operation:
+ description: The operation performed.
+ returned: changed
+ type: str
+ sample: update
+"""
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.consul import (
+ AUTH_ARGUMENTS_SPEC,
+ _ConsulModule,
+ camel_case_key,
+)
+
+
+def normalize_ttl(ttl):
+ matches = re.findall(r"(\d+)(:h|m|s)", ttl)
+ ttl = 0
+ for value, unit in matches:
+ value = int(value)
+ if unit == "m":
+ value *= 60
+ elif unit == "h":
+ value *= 60 * 60
+ ttl += value
+
+ new_ttl = ""
+ hours, remainder = divmod(ttl, 3600)
+ if hours:
+ new_ttl += "{0}h".format(hours)
+ minutes, seconds = divmod(remainder, 60)
+ if minutes:
+ new_ttl += "{0}m".format(minutes)
+ if seconds:
+ new_ttl += "{0}s".format(seconds)
+ return new_ttl
+
+
+class ConsulAuthMethodModule(_ConsulModule):
+ api_endpoint = "acl/auth-method"
+ result_key = "auth_method"
+ unique_identifier = "name"
+
+ def map_param(self, k, v, is_update):
+ if k == "config" and v:
+ v = {camel_case_key(k2): v2 for k2, v2 in v.items()}
+ return super(ConsulAuthMethodModule, self).map_param(k, v, is_update)
+
+ def needs_update(self, api_obj, module_obj):
+ if "MaxTokenTTL" in module_obj:
+ module_obj["MaxTokenTTL"] = normalize_ttl(module_obj["MaxTokenTTL"])
+ return super(ConsulAuthMethodModule, self).needs_update(api_obj, module_obj)
+
+
+_ARGUMENT_SPEC = {
+ "name": dict(type="str", required=True),
+ "type": dict(type="str", choices=["kubernetes", "jwt", "oidc", "aws-iam"]),
+ "description": dict(type="str"),
+ "display_name": dict(type="str"),
+ "max_token_ttl": dict(type="str", no_log=False),
+ "token_locality": dict(type="str", choices=["local", "global"]),
+ "config": dict(type="dict"),
+ "state": dict(default="present", choices=["present", "absent"]),
+}
+_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC)
+
+
+def main():
+ module = AnsibleModule(
+ _ARGUMENT_SPEC,
+ supports_check_mode=True,
+ )
+ consul_module = ConsulAuthMethodModule(module)
+ consul_module.execute()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/consul_binding_rule.py b/ansible_collections/community/general/plugins/modules/consul_binding_rule.py
new file mode 100644
index 000000000..88496f867
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/consul_binding_rule.py
@@ -0,0 +1,183 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2024, Florian Apolloner (@apollo13)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: consul_binding_rule
+short_description: Manipulate Consul binding rules
+version_added: 8.3.0
+description:
+ - Allows the addition, modification and deletion of binding rules in a consul
+ cluster via the agent. For more details on using and configuring binding rules,
+ see U(https://developer.hashicorp.com/consul/api-docs/acl/binding-rules).
+author:
+ - Florian Apolloner (@apollo13)
+extends_documentation_fragment:
+ - community.general.consul
+ - community.general.consul.actiongroup_consul
+ - community.general.consul.token
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: partial
+ details:
+ - In check mode the diff will miss operational attributes.
+options:
+ state:
+ description:
+ - Whether the binding rule should be present or absent.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ name:
+ description:
+ - Specifies a name for the binding rule.
+ - 'Note: This is used to identify the binding rule. But since the API does not support a name, it is prefixed to the description.'
+ type: str
+ required: true
+ description:
+ description:
+ - Free form human readable description of the binding rule.
+ type: str
+ auth_method:
+ description:
+ - The name of the auth method that this rule applies to.
+ type: str
+ required: true
+ selector:
+ description:
+ - Specifies the expression used to match this rule against valid identities returned from an auth method validation.
+ - If empty this binding rule matches all valid identities returned from the auth method.
+ type: str
+ bind_type:
+ description:
+ - Specifies the way the binding rule affects a token created at login.
+ type: str
+ choices: [service, node, role, templated-policy]
+ bind_name:
+ description:
+ - The name to bind to a token at login-time.
+ - What it binds to can be adjusted with different values of the O(bind_type) parameter.
+ type: str
+ bind_vars:
+ description:
+ - Specifies the templated policy variables when O(bind_type) is set to V(templated-policy).
+ type: dict
+"""
+
+EXAMPLES = """
+- name: Create a binding rule
+ community.general.consul_binding_rule:
+ name: my_name
+ description: example rule
+ auth_method: minikube
+ bind_type: service
+ bind_name: "{{ serviceaccount.name }}"
+ token: "{{ consul_management_token }}"
+
+- name: Remove a binding rule
+ community.general.consul_binding_rule:
+ name: my_name
+ auth_method: minikube
+ state: absent
+"""
+
+RETURN = """
+binding_rule:
+ description: The binding rule as returned by the consul HTTP API.
+ returned: always
+ type: dict
+ sample:
+ Description: "my_name: example rule"
+ AuthMethod: minikube
+ Selector: serviceaccount.namespace==default
+ BindType: service
+ BindName: "{{ serviceaccount.name }}"
+ CreateIndex: 30
+ ID: 59c8a237-e481-4239-9202-45f117950c5f
+ ModifyIndex: 33
+operation:
+ description: The operation performed.
+ returned: changed
+ type: str
+ sample: update
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.consul import (
+ AUTH_ARGUMENTS_SPEC,
+ RequestError,
+ _ConsulModule,
+)
+
+
+class ConsulBindingRuleModule(_ConsulModule):
+ api_endpoint = "acl/binding-rule"
+ result_key = "binding_rule"
+ unique_identifier = "id"
+
+ def read_object(self):
+ url = "acl/binding-rules?authmethod={0}".format(self.params["auth_method"])
+ try:
+ results = self.get(url)
+ for result in results:
+ if result.get("Description").startswith(
+ "{0}: ".format(self.params["name"])
+ ):
+ return result
+ except RequestError as e:
+ if e.status == 404:
+ return
+ elif e.status == 403 and b"ACL not found" in e.response_data:
+ return
+ raise
+
+ def module_to_obj(self, is_update):
+ obj = super(ConsulBindingRuleModule, self).module_to_obj(is_update)
+ del obj["Name"]
+ return obj
+
+ def prepare_object(self, existing, obj):
+ final = super(ConsulBindingRuleModule, self).prepare_object(existing, obj)
+ name = self.params["name"]
+ description = final.pop("Description", "").split(": ", 1)[-1]
+ final["Description"] = "{0}: {1}".format(name, description)
+ return final
+
+
+_ARGUMENT_SPEC = {
+ "name": dict(type="str", required=True),
+ "description": dict(type="str"),
+ "auth_method": dict(type="str", required=True),
+ "selector": dict(type="str"),
+ "bind_type": dict(
+ type="str", choices=["service", "node", "role", "templated-policy"]
+ ),
+ "bind_name": dict(type="str"),
+ "bind_vars": dict(type="dict"),
+ "state": dict(default="present", choices=["present", "absent"]),
+}
+_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC)
+
+
+def main():
+ module = AnsibleModule(
+ _ARGUMENT_SPEC,
+ supports_check_mode=True,
+ )
+ consul_module = ConsulBindingRuleModule(module)
+ consul_module.execute()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/consul_kv.py b/ansible_collections/community/general/plugins/modules/consul_kv.py
index a4457f244..84169fc6b 100644
--- a/ansible_collections/community/general/plugins/modules/consul_kv.py
+++ b/ansible_collections/community/general/plugins/modules/consul_kv.py
@@ -17,7 +17,7 @@ description:
- Allows the retrieval, addition, modification and deletion of key/value entries in a
consul cluster via the agent. The entire contents of the record, including
the indices, flags and session are returned as C(value).
- - If the C(key) represents a prefix then note that when a value is removed, the existing
+ - If the O(key) represents a prefix then note that when a value is removed, the existing
value if any is returned as part of the results.
- See http://www.consul.io/docs/agent/http.html#kv for more details.
requirements:
@@ -36,14 +36,14 @@ attributes:
options:
state:
description:
- - The action to take with the supplied key and value. If the state is C(present) and I(value) is set, the key
- contents will be set to the value supplied and C(changed) will be set to C(true) only if the value was
- different to the current contents. If the state is C(present) and I(value) is not set, the existing value
- associated to the key will be returned. The state C(absent) will remove the key/value pair,
- again C(changed) will be set to true only if the key actually existed
+ - The action to take with the supplied key and value. If the state is V(present) and O(value) is set, the key
+ contents will be set to the value supplied and C(changed) will be set to V(true) only if the value was
+ different to the current contents. If the state is V(present) and O(value) is not set, the existing value
+ associated to the key will be returned. The state V(absent) will remove the key/value pair,
+ again C(changed) will be set to V(true) only if the key actually existed
prior to the removal. An attempt can be made to obtain or free the
- lock associated with a key/value pair with the states C(acquire) or
- C(release) respectively. a valid session must be supplied to make the
+ lock associated with a key/value pair with the states V(acquire) or
+ V(release) respectively. a valid session must be supplied to make the
attempt changed will be true if the attempt is successful, false
otherwise.
type: str
@@ -56,17 +56,17 @@ options:
required: true
value:
description:
- - The value should be associated with the given key, required if C(state)
- is C(present).
+ - The value should be associated with the given key, required if O(state)
+ is V(present).
type: str
recurse:
description:
- If the key represents a prefix, each entry with the prefix can be
- retrieved by setting this to C(true).
+ retrieved by setting this to V(true).
type: bool
retrieve:
description:
- - If the I(state) is C(present) and I(value) is set, perform a
+ - If the O(state) is V(present) and O(value) is set, perform a
read after setting the value and return this value.
default: true
type: bool
@@ -82,9 +82,9 @@ options:
type: str
cas:
description:
- - Used when acquiring a lock with a session. If the C(cas) is C(0), then
+ - Used when acquiring a lock with a session. If the O(cas) is V(0), then
Consul will only put the key if it does not already exist. If the
- C(cas) value is non-zero, then the key is only set if the index matches
+ O(cas) value is non-zero, then the key is only set if the index matches
the ModifyIndex of that key.
type: str
flags:
diff --git a/ansible_collections/community/general/plugins/modules/consul_policy.py b/ansible_collections/community/general/plugins/modules/consul_policy.py
new file mode 100644
index 000000000..f020622a0
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/consul_policy.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2022, HÃ¥kon Lerring
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: consul_policy
+short_description: Manipulate Consul policies
+version_added: 7.2.0
+description:
+ - Allows the addition, modification and deletion of policies in a consul
+ cluster via the agent. For more details on using and configuring ACLs,
+ see U(https://www.consul.io/docs/guides/acl.html).
+author:
+ - HÃ¥kon Lerring (@Hakon)
+extends_documentation_fragment:
+ - community.general.consul
+ - community.general.consul.actiongroup_consul
+ - community.general.consul.token
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ version_added: 8.3.0
+ diff_mode:
+ support: partial
+ version_added: 8.3.0
+ details:
+ - In check mode the diff will miss operational attributes.
+options:
+ state:
+ description:
+ - Whether the policy should be present or absent.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ valid_datacenters:
+ description:
+ - Valid datacenters for the policy. All if list is empty.
+ type: list
+ elements: str
+ name:
+ description:
+ - The name that should be associated with the policy, this is opaque
+ to Consul.
+ required: true
+ type: str
+ description:
+ description:
+ - Description of the policy.
+ type: str
+ rules:
+ type: str
+ description:
+ - Rule document that should be associated with the current policy.
+"""
+
+EXAMPLES = """
+- name: Create a policy with rules
+ community.general.consul_policy:
+ host: consul1.example.com
+ token: some_management_acl
+ name: foo-access
+ rules: |
+ key "foo" {
+ policy = "read"
+ }
+ key "private/foo" {
+ policy = "deny"
+ }
+
+- name: Update the rules associated to a policy
+ community.general.consul_policy:
+ host: consul1.example.com
+ token: some_management_acl
+ name: foo-access
+ rules: |
+ key "foo" {
+ policy = "read"
+ }
+ key "private/foo" {
+ policy = "deny"
+ }
+ event "bbq" {
+ policy = "write"
+ }
+
+- name: Remove a policy
+ community.general.consul_policy:
+ host: consul1.example.com
+ token: some_management_acl
+ name: foo-access
+ state: absent
+"""
+
+RETURN = """
+policy:
+ description: The policy as returned by the consul HTTP API.
+ returned: always
+ type: dict
+ sample:
+ CreateIndex: 632
+ Description: Testing
+ Hash: rj5PeDHddHslkpW7Ij4OD6N4bbSXiecXFmiw2SYXg2A=
+ Name: foo-access
+ Rules: |-
+ key "foo" {
+ policy = "read"
+ }
+ key "private/foo" {
+ policy = "deny"
+ }
+operation:
+ description: The operation performed.
+ returned: changed
+ type: str
+ sample: update
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.consul import (
+ AUTH_ARGUMENTS_SPEC,
+ OPERATION_READ,
+ _ConsulModule,
+)
+
+_ARGUMENT_SPEC = {
+ "name": dict(required=True),
+ "description": dict(required=False, type="str"),
+ "rules": dict(type="str"),
+ "valid_datacenters": dict(type="list", elements="str"),
+ "state": dict(default="present", choices=["present", "absent"]),
+}
+_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC)
+
+
+class ConsulPolicyModule(_ConsulModule):
+ api_endpoint = "acl/policy"
+ result_key = "policy"
+ unique_identifier = "id"
+
+ def endpoint_url(self, operation, identifier=None):
+ if operation == OPERATION_READ:
+ return [self.api_endpoint, "name", self.params["name"]]
+ return super(ConsulPolicyModule, self).endpoint_url(operation, identifier)
+
+
+def main():
+ module = AnsibleModule(
+ _ARGUMENT_SPEC,
+ supports_check_mode=True,
+ )
+ consul_module = ConsulPolicyModule(module)
+ consul_module.execute()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/consul_role.py b/ansible_collections/community/general/plugins/modules/consul_role.py
new file mode 100644
index 000000000..0da71507a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/consul_role.py
@@ -0,0 +1,281 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2022, HÃ¥kon Lerring
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: consul_role
+short_description: Manipulate Consul roles
+version_added: 7.5.0
+description:
+ - Allows the addition, modification and deletion of roles in a consul
+ cluster via the agent. For more details on using and configuring ACLs,
+ see U(https://www.consul.io/docs/guides/acl.html).
+author:
+ - HÃ¥kon Lerring (@Hakon)
+extends_documentation_fragment:
+ - community.general.consul
+ - community.general.consul.token
+ - community.general.consul.actiongroup_consul
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: partial
+ details:
+ - In check mode the diff will miss operational attributes.
+ version_added: 8.3.0
+options:
+ name:
+ description:
+ - A name used to identify the role.
+ required: true
+ type: str
+ state:
+ description:
+ - whether the role should be present or absent.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ description:
+ description:
+ - Description of the role.
+ - If not specified, the assigned description will not be changed.
+ type: str
+ policies:
+ type: list
+ elements: dict
+ description:
+ - List of policies to attach to the role. Each policy is a dict.
+ - If the parameter is left blank, any policies currently assigned will not be changed.
+ - Any empty array (V([])) will clear any policies previously set.
+ suboptions:
+ name:
+ description:
+ - The name of the policy to attach to this role; see M(community.general.consul_policy) for more info.
+ - Either this or O(policies[].id) must be specified.
+ type: str
+ id:
+ description:
+ - The ID of the policy to attach to this role; see M(community.general.consul_policy) for more info.
+ - Either this or O(policies[].name) must be specified.
+ type: str
+ templated_policies:
+ description:
+ - The list of templated policies that should be applied to the role.
+ type: list
+ elements: dict
+ version_added: 8.3.0
+ suboptions:
+ template_name:
+ description:
+ - The templated policy name.
+ type: str
+ required: true
+ template_variables:
+ description:
+ - The templated policy variables.
+ - Not all templated policies require variables.
+ type: dict
+ service_identities:
+ type: list
+ elements: dict
+ description:
+ - List of service identities to attach to the role.
+ - If not specified, any service identities currently assigned will not be changed.
+ - If the parameter is an empty array (V([])), any node identities assigned will be unassigned.
+ suboptions:
+ service_name:
+ description:
+ - The name of the node.
+ - Must not be longer than 256 characters, must start and end with a lowercase alphanumeric character.
+ - May only contain lowercase alphanumeric characters as well as - and _.
+ - This suboption has been renamed from O(service_identities[].name) to O(service_identities[].service_name)
+ in community.general 8.3.0. The old name can still be used.
+ type: str
+ required: true
+ aliases:
+ - name
+ datacenters:
+ description:
+ - The datacenters the policies will be effective.
+ - This will result in effective policy only being valid in this datacenter.
+ - If an empty array (V([])) is specified, the policies will valid in all datacenters.
+ - including those which do not yet exist but may in the future.
+ type: list
+ elements: str
+ node_identities:
+ type: list
+ elements: dict
+ description:
+ - List of node identities to attach to the role.
+ - If not specified, any node identities currently assigned will not be changed.
+ - If the parameter is an empty array (V([])), any node identities assigned will be unassigned.
+ suboptions:
+ node_name:
+ description:
+ - The name of the node.
+ - Must not be longer than 256 characters, must start and end with a lowercase alphanumeric character.
+ - May only contain lowercase alphanumeric characters as well as - and _.
+ - This suboption has been renamed from O(node_identities[].name) to O(node_identities[].node_name)
+ in community.general 8.3.0. The old name can still be used.
+ type: str
+ required: true
+ aliases:
+ - name
+ datacenter:
+ description:
+ - The nodes datacenter.
+ - This will result in effective policy only being valid in this datacenter.
+ type: str
+ required: true
+"""
+
+EXAMPLES = """
+- name: Create a role with 2 policies
+ community.general.consul_role:
+ host: consul1.example.com
+ token: some_management_acl
+ name: foo-role
+ policies:
+ - id: 783beef3-783f-f41f-7422-7087dc272765
+ - name: "policy-1"
+
+- name: Create a role with service identity
+ community.general.consul_role:
+ host: consul1.example.com
+ token: some_management_acl
+ name: foo-role-2
+ service_identities:
+ - name: web
+ datacenters:
+ - dc1
+
+- name: Create a role with node identity
+ community.general.consul_role:
+ host: consul1.example.com
+ token: some_management_acl
+ name: foo-role-3
+ node_identities:
+ - name: node-1
+ datacenter: dc2
+
+- name: Remove a role
+ community.general.consul_role:
+ host: consul1.example.com
+ token: some_management_acl
+ name: foo-role-3
+ state: absent
+"""
+
+RETURN = """
+role:
+ description: The role object.
+ returned: success
+ type: dict
+ sample:
+ {
+ "CreateIndex": 39,
+ "Description": "",
+ "Hash": "Trt0QJtxVEfvTTIcdTUbIJRr6Dsi6E4EcwSFxx9tCYM=",
+ "ID": "9a300b8d-48db-b720-8544-a37c0f5dafb5",
+ "ModifyIndex": 39,
+ "Name": "foo-role",
+ "Policies": [
+ {"ID": "b1a00172-d7a1-0e66-a12e-7a4045c4b774", "Name": "foo-access"}
+ ]
+ }
+operation:
+ description: The operation performed on the role.
+ returned: changed
+ type: str
+ sample: update
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.consul import (
+ AUTH_ARGUMENTS_SPEC,
+ OPERATION_READ,
+ _ConsulModule,
+)
+
+
+class ConsulRoleModule(_ConsulModule):
+ api_endpoint = "acl/role"
+ result_key = "role"
+ unique_identifier = "id"
+
+ def endpoint_url(self, operation, identifier=None):
+ if operation == OPERATION_READ:
+ return [self.api_endpoint, "name", self.params["name"]]
+ return super(ConsulRoleModule, self).endpoint_url(operation, identifier)
+
+
+NAME_ID_SPEC = dict(
+ name=dict(type="str"),
+ id=dict(type="str"),
+)
+
+NODE_ID_SPEC = dict(
+ node_name=dict(type="str", required=True, aliases=["name"]),
+ datacenter=dict(type="str", required=True),
+)
+
+SERVICE_ID_SPEC = dict(
+ service_name=dict(type="str", required=True, aliases=["name"]),
+ datacenters=dict(type="list", elements="str"),
+)
+
+TEMPLATE_POLICY_SPEC = dict(
+ template_name=dict(type="str", required=True),
+ template_variables=dict(type="dict"),
+)
+
+_ARGUMENT_SPEC = {
+ "name": dict(type="str", required=True),
+ "description": dict(type="str"),
+ "policies": dict(
+ type="list",
+ elements="dict",
+ options=NAME_ID_SPEC,
+ mutually_exclusive=[("name", "id")],
+ required_one_of=[("name", "id")],
+ ),
+ "templated_policies": dict(
+ type="list",
+ elements="dict",
+ options=TEMPLATE_POLICY_SPEC,
+ ),
+ "node_identities": dict(
+ type="list",
+ elements="dict",
+ options=NODE_ID_SPEC,
+ ),
+ "service_identities": dict(
+ type="list",
+ elements="dict",
+ options=SERVICE_ID_SPEC,
+ ),
+ "state": dict(default="present", choices=["present", "absent"]),
+}
+_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC)
+
+
+def main():
+ module = AnsibleModule(
+ _ARGUMENT_SPEC,
+ supports_check_mode=True,
+ )
+ consul_module = ConsulRoleModule(module)
+ consul_module.execute()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/consul_session.py b/ansible_collections/community/general/plugins/modules/consul_session.py
index 246d13846..bd03b561a 100644
--- a/ansible_collections/community/general/plugins/modules/consul_session.py
+++ b/ansible_collections/community/general/plugins/modules/consul_session.py
@@ -16,12 +16,13 @@ description:
cluster. These sessions can then be used in conjunction with key value pairs
to implement distributed locks. In depth documentation for working with
sessions can be found at http://www.consul.io/docs/internals/sessions.html
-requirements:
- - python-consul
- - requests
author:
- Steve Gargan (@sgargan)
+ - HÃ¥kon Lerring (@Hakon)
extends_documentation_fragment:
+ - community.general.consul
+ - community.general.consul.actiongroup_consul
+ - community.general.consul.token
- community.general.attributes
attributes:
check_mode:
@@ -31,25 +32,25 @@ attributes:
options:
id:
description:
- - ID of the session, required when I(state) is either C(info) or
- C(remove).
+ - ID of the session, required when O(state) is either V(info) or
+ V(remove).
type: str
state:
description:
- Whether the session should be present i.e. created if it doesn't
- exist, or absent, removed if present. If created, the I(id) for the
- session is returned in the output. If C(absent), I(id) is
+ exist, or absent, removed if present. If created, the O(id) for the
+ session is returned in the output. If V(absent), O(id) is
required to remove the session. Info for a single session, all the
sessions for a node or all available sessions can be retrieved by
- specifying C(info), C(node) or C(list) for the I(state); for C(node)
- or C(info), the node I(name) or session I(id) is required as parameter.
+ specifying V(info), V(node) or V(list) for the O(state); for V(node)
+ or V(info), the node O(name) or session O(id) is required as parameter.
choices: [ absent, info, list, node, present ]
type: str
default: present
name:
description:
- The name that should be associated with the session. Required when
- I(state=node) is used.
+ O(state=node) is used.
type: str
delay:
description:
@@ -76,26 +77,6 @@ options:
the associated lock delay has expired.
type: list
elements: str
- host:
- description:
- - The host of the consul agent defaults to localhost.
- type: str
- default: localhost
- port:
- description:
- - The port on which the consul agent is running.
- type: int
- default: 8500
- scheme:
- description:
- - The protocol scheme on which the consul agent is running.
- type: str
- default: http
- validate_certs:
- description:
- - Whether to verify the TLS certificate of the consul agent.
- type: bool
- default: true
behavior:
description:
- The optional behavior that can be attached to the session when it
@@ -109,10 +90,6 @@ options:
type: int
version_added: 5.4.0
token:
- description:
- - The token key identifying an ACL rule set that controls access to
- the key value pair.
- type: str
version_added: 5.6.0
'''
@@ -147,37 +124,50 @@ EXAMPLES = '''
ttl: 600 # sec
'''
-try:
- import consul
- from requests.exceptions import ConnectionError
- python_consul_installed = True
-except ImportError:
- python_consul_installed = False
-
from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.consul import (
+ AUTH_ARGUMENTS_SPEC, _ConsulModule
+)
-def execute(module):
+def execute(module, consul_module):
state = module.params.get('state')
if state in ['info', 'list', 'node']:
- lookup_sessions(module)
+ lookup_sessions(module, consul_module)
elif state == 'present':
- update_session(module)
+ update_session(module, consul_module)
else:
- remove_session(module)
+ remove_session(module, consul_module)
+
+
+def list_sessions(consul_module, datacenter):
+ return consul_module.get(
+ 'session/list',
+ params={'dc': datacenter})
+
+
+def list_sessions_for_node(consul_module, node, datacenter):
+ return consul_module.get(
+ ('session', 'node', node),
+ params={'dc': datacenter})
-def lookup_sessions(module):
+def get_session_info(consul_module, session_id, datacenter):
+ return consul_module.get(
+ ('session', 'info', session_id),
+ params={'dc': datacenter})
+
+
+def lookup_sessions(module, consul_module):
datacenter = module.params.get('datacenter')
state = module.params.get('state')
- consul_client = get_consul_api(module)
try:
if state == 'list':
- sessions_list = consul_client.session.list(dc=datacenter)
+ sessions_list = list_sessions(consul_module, datacenter)
# Ditch the index, this can be grabbed from the results
if sessions_list and len(sessions_list) >= 2:
sessions_list = sessions_list[1]
@@ -185,14 +175,14 @@ def lookup_sessions(module):
sessions=sessions_list)
elif state == 'node':
node = module.params.get('node')
- sessions = consul_client.session.node(node, dc=datacenter)
+ sessions = list_sessions_for_node(consul_module, node, datacenter)
module.exit_json(changed=True,
node=node,
sessions=sessions)
elif state == 'info':
session_id = module.params.get('id')
- session_by_id = consul_client.session.info(session_id, dc=datacenter)
+ session_by_id = get_session_info(consul_module, session_id, datacenter)
module.exit_json(changed=True,
session_id=session_id,
sessions=session_by_id)
@@ -201,7 +191,26 @@ def lookup_sessions(module):
module.fail_json(msg="Could not retrieve session info %s" % e)
-def update_session(module):
+def create_session(consul_module, name, behavior, ttl, node,
+ lock_delay, datacenter, checks):
+ create_data = {
+ "LockDelay": lock_delay,
+ "Node": node,
+ "Name": name,
+ "Checks": checks,
+ "Behavior": behavior,
+ }
+ if ttl is not None:
+ create_data["TTL"] = "%ss" % str(ttl) # TTL is in seconds
+ create_session_response_dict = consul_module.put(
+ 'session/create',
+ params={
+ 'dc': datacenter},
+ data=create_data)
+ return create_session_response_dict["ID"]
+
+
+def update_session(module, consul_module):
name = module.params.get('name')
delay = module.params.get('delay')
@@ -211,18 +220,16 @@ def update_session(module):
behavior = module.params.get('behavior')
ttl = module.params.get('ttl')
- consul_client = get_consul_api(module)
-
try:
- session = consul_client.session.create(
- name=name,
- behavior=behavior,
- ttl=ttl,
- node=node,
- lock_delay=delay,
- dc=datacenter,
- checks=checks
- )
+ session = create_session(consul_module,
+ name=name,
+ behavior=behavior,
+ ttl=ttl,
+ node=node,
+ lock_delay=delay,
+ datacenter=datacenter,
+ checks=checks
+ )
module.exit_json(changed=True,
session_id=session,
name=name,
@@ -235,13 +242,15 @@ def update_session(module):
module.fail_json(msg="Could not create/update session %s" % e)
-def remove_session(module):
- session_id = module.params.get('id')
+def destroy_session(consul_module, session_id):
+ return consul_module.put(('session', 'destroy', session_id))
- consul_client = get_consul_api(module)
+
+def remove_session(module, consul_module):
+ session_id = module.params.get('id')
try:
- consul_client.session.destroy(session_id)
+ destroy_session(consul_module, session_id)
module.exit_json(changed=True,
session_id=session_id)
@@ -250,36 +259,31 @@ def remove_session(module):
session_id, e))
-def get_consul_api(module):
- return consul.Consul(host=module.params.get('host'),
- port=module.params.get('port'),
- scheme=module.params.get('scheme'),
- verify=module.params.get('validate_certs'),
- token=module.params.get('token'))
-
-
-def test_dependencies(module):
- if not python_consul_installed:
- module.fail_json(msg="python-consul required for this module. "
- "see https://python-consul.readthedocs.io/en/latest/#installation")
-
-
def main():
argument_spec = dict(
checks=dict(type='list', elements='str'),
delay=dict(type='int', default='15'),
- behavior=dict(type='str', default='release', choices=['release', 'delete']),
+ behavior=dict(
+ type='str',
+ default='release',
+ choices=[
+ 'release',
+ 'delete']),
ttl=dict(type='int'),
- host=dict(type='str', default='localhost'),
- port=dict(type='int', default=8500),
- scheme=dict(type='str', default='http'),
- validate_certs=dict(type='bool', default=True),
id=dict(type='str'),
name=dict(type='str'),
node=dict(type='str'),
- state=dict(type='str', default='present', choices=['absent', 'info', 'list', 'node', 'present']),
+ state=dict(
+ type='str',
+ default='present',
+ choices=[
+ 'absent',
+ 'info',
+ 'list',
+ 'node',
+ 'present']),
datacenter=dict(type='str'),
- token=dict(type='str', no_log=True),
+ **AUTH_ARGUMENTS_SPEC
)
module = AnsibleModule(
@@ -291,14 +295,10 @@ def main():
],
supports_check_mode=False
)
-
- test_dependencies(module)
+ consul_module = _ConsulModule(module)
try:
- execute(module)
- except ConnectionError as e:
- module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
- module.params.get('host'), module.params.get('port'), e))
+ execute(module, consul_module)
except Exception as e:
module.fail_json(msg=str(e))
diff --git a/ansible_collections/community/general/plugins/modules/consul_token.py b/ansible_collections/community/general/plugins/modules/consul_token.py
new file mode 100644
index 000000000..eee419863
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/consul_token.py
@@ -0,0 +1,331 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2024, Florian Apolloner (@apollo13)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: consul_token
+short_description: Manipulate Consul tokens
+version_added: 8.3.0
+description:
+ - Allows the addition, modification and deletion of tokens in a consul
+ cluster via the agent. For more details on using and configuring ACLs,
+ see U(https://www.consul.io/docs/guides/acl.html).
+author:
+ - Florian Apolloner (@apollo13)
+extends_documentation_fragment:
+ - community.general.consul
+ - community.general.consul.token
+ - community.general.consul.actiongroup_consul
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: partial
+ details:
+ - In check mode the diff will miss operational attributes.
+options:
+ state:
+ description:
+ - Whether the token should be present or absent.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ accessor_id:
+ description:
+ - Specifies a UUID to use as the token's Accessor ID.
+ If not specified a UUID will be generated for this field.
+ type: str
+ secret_id:
+ description:
+ - Specifies a UUID to use as the token's Secret ID.
+ If not specified a UUID will be generated for this field.
+ type: str
+ description:
+ description:
+ - Free form human readable description of the token.
+ type: str
+ policies:
+ type: list
+ elements: dict
+ description:
+ - List of policies to attach to the token. Each policy is a dict.
+ - If the parameter is left blank, any policies currently assigned will not be changed.
+ - Any empty array (V([])) will clear any policies previously set.
+ suboptions:
+ name:
+ description:
+ - The name of the policy to attach to this token; see M(community.general.consul_policy) for more info.
+ - Either this or O(policies[].id) must be specified.
+ type: str
+ id:
+ description:
+ - The ID of the policy to attach to this token; see M(community.general.consul_policy) for more info.
+ - Either this or O(policies[].name) must be specified.
+ type: str
+ roles:
+ type: list
+ elements: dict
+ description:
+ - List of roles to attach to the token. Each role is a dict.
+ - If the parameter is left blank, any roles currently assigned will not be changed.
+ - Any empty array (V([])) will clear any roles previously set.
+ suboptions:
+ name:
+ description:
+ - The name of the role to attach to this token; see M(community.general.consul_role) for more info.
+ - Either this or O(roles[].id) must be specified.
+ type: str
+ id:
+ description:
+ - The ID of the role to attach to this token; see M(community.general.consul_role) for more info.
+ - Either this or O(roles[].name) must be specified.
+ type: str
+ templated_policies:
+ description:
+ - The list of templated policies that should be applied to the role.
+ type: list
+ elements: dict
+ suboptions:
+ template_name:
+ description:
+ - The templated policy name.
+ type: str
+ required: true
+ template_variables:
+ description:
+ - The templated policy variables.
+ - Not all templated policies require variables.
+ type: dict
+ service_identities:
+ type: list
+ elements: dict
+ description:
+ - List of service identities to attach to the token.
+ - If not specified, any service identities currently assigned will not be changed.
+ - If the parameter is an empty array (V([])), any node identities assigned will be unassigned.
+ suboptions:
+ service_name:
+ description:
+ - The name of the service.
+ - Must not be longer than 256 characters, must start and end with a lowercase alphanumeric character.
+ - May only contain lowercase alphanumeric characters as well as V(-) and V(_).
+ type: str
+ required: true
+ datacenters:
+ description:
+ - The datacenters the token will be effective.
+ - If an empty array (V([])) is specified, the token will valid in all datacenters.
+ - including those which do not yet exist but may in the future.
+ type: list
+ elements: str
+ node_identities:
+ type: list
+ elements: dict
+ description:
+ - List of node identities to attach to the token.
+ - If not specified, any node identities currently assigned will not be changed.
+ - If the parameter is an empty array (V([])), any node identities assigned will be unassigned.
+ suboptions:
+ node_name:
+ description:
+ - The name of the node.
+ - Must not be longer than 256 characters, must start and end with a lowercase alphanumeric character.
+ - May only contain lowercase alphanumeric characters as well as V(-) and V(_).
+ type: str
+ required: true
+ datacenter:
+ description:
+ - The nodes datacenter.
+ - This will result in effective token only being valid in this datacenter.
+ type: str
+ required: true
+ local:
+ description:
+ - If true, indicates that the token should not be replicated globally
+ and instead be local to the current datacenter.
+ type: bool
+ expiration_ttl:
+ description:
+ - This is a convenience field and if set will initialize the C(expiration_time).
+ Can be specified in the form of V(60s) or V(5m) (that is, 60 seconds or 5 minutes,
+ respectively). Ingored when the token is updated!
+ type: str
+"""
+
+EXAMPLES = """
+- name: Create / Update a token by accessor_id
+ community.general.consul_token:
+ state: present
+ accessor_id: 07a7de84-c9c7-448a-99cc-beaf682efd21
+ token: 8adddd91-0bd6-d41d-ae1a-3b49cfa9a0e8
+ roles:
+ - name: role1
+ - name: role2
+ service_identities:
+ - service_name: service1
+ datacenters: [dc1, dc2]
+ node_identities:
+ - node_name: node1
+ datacenter: dc1
+ expiration_ttl: 50m
+
+- name: Delete a token
+ community.general.consul_token:
+ state: absent
+ accessor_id: 07a7de84-c9c7-448a-99cc-beaf682efd21
+ token: 8adddd91-0bd6-d41d-ae1a-3b49cfa9a0e8
+"""
+
+RETURN = """
+token:
+ description: The token as returned by the consul HTTP API.
+ returned: always
+ type: dict
+ sample:
+ AccessorID: 07a7de84-c9c7-448a-99cc-beaf682efd21
+ CreateIndex: 632
+ CreateTime: "2024-01-14T21:53:01.402749174+01:00"
+ Description: Testing
+ Hash: rj5PeDHddHslkpW7Ij4OD6N4bbSXiecXFmiw2SYXg2A=
+ Local: false
+ ModifyIndex: 633
+ SecretID: bd380fba-da17-7cee-8576-8d6427c6c930
+ ServiceIdentities: [{"ServiceName": "test"}]
+operation:
+ description: The operation performed.
+ returned: changed
+ type: str
+ sample: update
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.consul import (
+ AUTH_ARGUMENTS_SPEC,
+ _ConsulModule,
+)
+
+
+def normalize_link_obj(api_obj, module_obj, key):
+ api_objs = api_obj.get(key)
+ module_objs = module_obj.get(key)
+ if api_objs is None or module_objs is None:
+ return
+ name_to_id = {i["Name"]: i["ID"] for i in api_objs}
+ id_to_name = {i["ID"]: i["Name"] for i in api_objs}
+
+ for obj in module_objs:
+ identifier = obj.get("ID")
+ name = obj.get("Name)")
+ if identifier and not name and identifier in id_to_name:
+ obj["Name"] = id_to_name[identifier]
+ if not identifier and name and name in name_to_id:
+ obj["ID"] = name_to_id[name]
+
+
+class ConsulTokenModule(_ConsulModule):
+ api_endpoint = "acl/token"
+ result_key = "token"
+ unique_identifier = "accessor_id"
+
+ create_only_fields = {"expiration_ttl"}
+
+ def read_object(self):
+ # if `accessor_id` is not supplied we can only create objects and are not idempotent
+ if not self.params.get(self.unique_identifier):
+ return None
+ return super(ConsulTokenModule, self).read_object()
+
+ def needs_update(self, api_obj, module_obj):
+ # SecretID is usually not supplied
+ if "SecretID" not in module_obj and "SecretID" in api_obj:
+ del api_obj["SecretID"]
+ normalize_link_obj(api_obj, module_obj, "Roles")
+ normalize_link_obj(api_obj, module_obj, "Policies")
+ # ExpirationTTL is only supported on create, not for update
+ # it writes to ExpirationTime, so we need to remove that as well
+ if "ExpirationTTL" in module_obj:
+ del module_obj["ExpirationTTL"]
+ return super(ConsulTokenModule, self).needs_update(api_obj, module_obj)
+
+
+NAME_ID_SPEC = dict(
+ name=dict(type="str"),
+ id=dict(type="str"),
+)
+
+NODE_ID_SPEC = dict(
+ node_name=dict(type="str", required=True),
+ datacenter=dict(type="str", required=True),
+)
+
+SERVICE_ID_SPEC = dict(
+ service_name=dict(type="str", required=True),
+ datacenters=dict(type="list", elements="str"),
+)
+
+TEMPLATE_POLICY_SPEC = dict(
+ template_name=dict(type="str", required=True),
+ template_variables=dict(type="dict"),
+)
+
+
+_ARGUMENT_SPEC = {
+ "description": dict(),
+ "accessor_id": dict(),
+ "secret_id": dict(no_log=True),
+ "roles": dict(
+ type="list",
+ elements="dict",
+ options=NAME_ID_SPEC,
+ mutually_exclusive=[("name", "id")],
+ required_one_of=[("name", "id")],
+ ),
+ "policies": dict(
+ type="list",
+ elements="dict",
+ options=NAME_ID_SPEC,
+ mutually_exclusive=[("name", "id")],
+ required_one_of=[("name", "id")],
+ ),
+ "templated_policies": dict(
+ type="list",
+ elements="dict",
+ options=TEMPLATE_POLICY_SPEC,
+ ),
+ "node_identities": dict(
+ type="list",
+ elements="dict",
+ options=NODE_ID_SPEC,
+ ),
+ "service_identities": dict(
+ type="list",
+ elements="dict",
+ options=SERVICE_ID_SPEC,
+ ),
+ "local": dict(type="bool"),
+ "expiration_ttl": dict(type="str"),
+ "state": dict(default="present", choices=["present", "absent"]),
+}
+_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC)
+
+
+def main():
+ module = AnsibleModule(
+ _ARGUMENT_SPEC,
+ required_if=[("state", "absent", ["accessor_id"])],
+ supports_check_mode=True,
+ )
+ consul_module = ConsulTokenModule(module)
+ consul_module.execute()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/copr.py b/ansible_collections/community/general/plugins/modules/copr.py
index 965c2a935..157a6c160 100644
--- a/ansible_collections/community/general/plugins/modules/copr.py
+++ b/ansible_collections/community/general/plugins/modules/copr.py
@@ -42,14 +42,14 @@ options:
type: str
state:
description:
- - Whether to set this project as C(enabled), C(disabled) or C(absent).
+ - Whether to set this project as V(enabled), V(disabled), or V(absent).
default: enabled
type: str
choices: [absent, enabled, disabled]
chroot:
description:
- The name of the chroot that you want to enable/disable/remove in the project,
- for example C(epel-7-x86_64). Default chroot is determined by the operating system,
+ for example V(epel-7-x86_64). Default chroot is determined by the operating system,
version of the operating system, and architecture on which the module is run.
type: str
"""
@@ -97,11 +97,26 @@ except ImportError:
DNF_IMP_ERR = traceback.format_exc()
HAS_DNF_PACKAGES = False
+from ansible.module_utils.common import respawn
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils.basic import missing_required_lib
-from ansible.module_utils import distro # pylint: disable=import-error
-from ansible.module_utils.basic import AnsibleModule # pylint: disable=import-error
-from ansible.module_utils.urls import open_url # pylint: disable=import-error
+from ansible.module_utils import distro
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import open_url
+
+
+def _respawn_dnf():
+ if respawn.has_respawned():
+ return
+ system_interpreters = (
+ "/usr/libexec/platform-python",
+ "/usr/bin/python3",
+ "/usr/bin/python2",
+ "/usr/bin/python",
+ )
+ interpreter = respawn.probe_interpreters_for_module(system_interpreters, "dnf")
+ if interpreter:
+ respawn.respawn_module(interpreter)
class CoprModule(object):
@@ -460,6 +475,7 @@ def run_module():
params = module.params
if not HAS_DNF_PACKAGES:
+ _respawn_dnf()
module.fail_json(msg=missing_required_lib("dnf"), exception=DNF_IMP_ERR)
CoprModule.ansible_module = module
diff --git a/ansible_collections/community/general/plugins/modules/cpanm.py b/ansible_collections/community/general/plugins/modules/cpanm.py
index 6260992df..20ac3e714 100644
--- a/ansible_collections/community/general/plugins/modules/cpanm.py
+++ b/ansible_collections/community/general/plugins/modules/cpanm.py
@@ -27,8 +27,8 @@ options:
name:
type: str
description:
- - The Perl library to install. Valid values change according to the I(mode), see notes for more details.
- - Note that for installing from a local path the parameter I(from_path) should be used.
+ - The Perl library to install. Valid values change according to the O(mode), see notes for more details.
+ - Note that for installing from a local path the parameter O(from_path) should be used.
aliases: [pkg]
from_path:
type: path
@@ -59,7 +59,7 @@ options:
default: false
version:
description:
- - Version specification for the perl module. When I(mode) is C(new), C(cpanm) version operators are accepted.
+ - Version specification for the perl module. When O(mode) is V(new), C(cpanm) version operators are accepted.
type: str
executable:
description:
@@ -68,32 +68,24 @@ options:
mode:
description:
- Controls the module behavior. See notes below for more details.
+ - Default is V(compatibility) but that behavior is deprecated and will be changed to V(new) in community.general 9.0.0.
type: str
choices: [compatibility, new]
- default: compatibility
version_added: 3.0.0
name_check:
description:
- - When in C(new) mode, this parameter can be used to check if there is a module I(name) installed (at I(version), when specified).
+ - When O(mode=new), this parameter can be used to check if there is a module O(name) installed (at O(version), when specified).
type: str
version_added: 3.0.0
notes:
- Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host.
- - "This module now comes with a choice of execution I(mode): C(compatibility) or C(new)."
- - "C(compatibility) mode:"
- - When using C(compatibility) mode, the module will keep backward compatibility. This is the default mode.
- - I(name) must be either a module name or a distribution file.
- - >
- If the perl module given by I(name) is installed (at the exact I(version) when specified), then nothing happens.
- Otherwise, it will be installed using the C(cpanm) executable.
- - I(name) cannot be an URL, or a git URL.
- - C(cpanm) version specifiers do not work in this mode.
- - "C(new) mode:"
- - "When using C(new) mode, the module will behave differently"
- - >
- The I(name) parameter may refer to a module name, a distribution file,
- a HTTP URL or a git repository URL as described in C(cpanminus) documentation.
- - C(cpanm) version specifiers are recognized.
+ - "This module now comes with a choice of execution O(mode): V(compatibility) or V(new)."
+ - "O(mode=compatibility): When using V(compatibility) mode, the module will keep backward compatibility. This is the default mode.
+ O(name) must be either a module name or a distribution file. If the perl module given by O(name) is installed (at the exact O(version)
+ when specified), then nothing happens. Otherwise, it will be installed using the C(cpanm) executable. O(name) cannot be an URL, or a git URL.
+ C(cpanm) version specifiers do not work in this mode."
+ - "O(mode=new): When using V(new) mode, the module will behave differently. The O(name) parameter may refer to a module name, a distribution file,
+ a HTTP URL or a git repository URL as described in C(cpanminus) documentation. C(cpanm) version specifiers are recognized."
author:
- "Franck Cuny (@fcuny)"
- "Alexei Znamensky (@russoz)"
@@ -158,7 +150,7 @@ class CPANMinus(ModuleHelper):
mirror_only=dict(type='bool', default=False),
installdeps=dict(type='bool', default=False),
executable=dict(type='path'),
- mode=dict(type='str', choices=['compatibility', 'new'], default='compatibility'),
+ mode=dict(type='str', choices=['compatibility', 'new']),
name_check=dict(type='str')
),
required_one_of=[('name', 'from_path')],
@@ -176,6 +168,14 @@ class CPANMinus(ModuleHelper):
def __init_module__(self):
v = self.vars
+ if v.mode is None:
+ self.deprecate(
+ "The default value 'compatibility' for parameter 'mode' is being deprecated "
+ "and it will be replaced by 'new'",
+ version="9.0.0",
+ collection_name="community.general"
+ )
+ v.mode = "compatibility"
if v.mode == "compatibility":
if v.name_check:
self.do_raise("Parameter name_check can only be used with mode=new")
@@ -183,8 +183,9 @@ class CPANMinus(ModuleHelper):
if v.name and v.from_path:
self.do_raise("Parameters 'name' and 'from_path' are mutually exclusive when 'mode=new'")
- self.command = self.get_bin_path(v.executable if v.executable else self.command)
- self.vars.set("binary", self.command)
+ self.command = v.executable if v.executable else self.command
+ self.runner = CmdRunner(self.module, self.command, self.command_args_formats, check_rc=True)
+ self.vars.binary = self.runner.binary
def _is_package_installed(self, name, locallib, version):
def process(rc, out, err):
@@ -220,8 +221,6 @@ class CPANMinus(ModuleHelper):
self.do_raise(msg=err, cmd=self.vars.cmd_args)
return 'is up to date' not in err and 'is up to date' not in out
- runner = CmdRunner(self.module, self.command, self.command_args_formats, check_rc=True)
-
v = self.vars
pkg_param = 'from_path' if v.from_path else 'name'
@@ -235,7 +234,7 @@ class CPANMinus(ModuleHelper):
return
pkg_spec = self.sanitize_pkg_spec_version(v[pkg_param], v.version)
- with runner(['notest', 'locallib', 'mirror', 'mirror_only', 'installdeps', 'pkg_spec'], output_process=process) as ctx:
+ with self.runner(['notest', 'locallib', 'mirror', 'mirror_only', 'installdeps', 'pkg_spec'], output_process=process) as ctx:
self.changed = ctx.run(pkg_spec=pkg_spec)
diff --git a/ansible_collections/community/general/plugins/modules/cronvar.py b/ansible_collections/community/general/plugins/modules/cronvar.py
index 7effed2ae..fdcbc7d24 100644
--- a/ansible_collections/community/general/plugins/modules/cronvar.py
+++ b/ansible_collections/community/general/plugins/modules/cronvar.py
@@ -40,16 +40,16 @@ options:
value:
description:
- The value to set this variable to.
- - Required if I(state=present).
+ - Required if O(state=present).
type: str
insertafter:
description:
- If specified, the variable will be inserted after the variable specified.
- - Used with I(state=present).
+ - Used with O(state=present).
type: str
insertbefore:
description:
- - Used with I(state=present). If specified, the variable will be inserted
+ - Used with O(state=present). If specified, the variable will be inserted
just before the variable specified.
type: str
state:
@@ -61,18 +61,19 @@ options:
user:
description:
- The specific user whose crontab should be modified.
- - This parameter defaults to C(root) when unset.
+ - This parameter defaults to V(root) when unset.
type: str
cron_file:
description:
- If specified, uses this file instead of an individual user's crontab.
- - Without a leading C(/), this is assumed to be in I(/etc/cron.d).
- - With a leading C(/), this is taken as absolute.
+ - Without a leading V(/), this is assumed to be in C(/etc/cron.d).
+ - With a leading V(/), this is taken as absolute.
type: str
backup:
description:
- If set, create a backup of the crontab before it is modified.
The location of the backup is returned in the C(backup) variable by this module.
+ # TODO: C() above should be RV(), but return values have not been documented!
type: bool
default: false
requirements:
diff --git a/ansible_collections/community/general/plugins/modules/crypttab.py b/ansible_collections/community/general/plugins/modules/crypttab.py
index 6aea362e7..931a0c930 100644
--- a/ansible_collections/community/general/plugins/modules/crypttab.py
+++ b/ansible_collections/community/general/plugins/modules/crypttab.py
@@ -25,38 +25,38 @@ options:
name:
description:
- Name of the encrypted block device as it appears in the C(/etc/crypttab) file, or
- optionally prefixed with C(/dev/mapper/), as it appears in the filesystem. I(/dev/mapper/)
- will be stripped from I(name).
+ optionally prefixed with V(/dev/mapper/), as it appears in the filesystem. V(/dev/mapper/)
+ will be stripped from O(name).
type: str
required: true
state:
description:
- - Use I(present) to add a line to C(/etc/crypttab) or update its definition
+ - Use V(present) to add a line to C(/etc/crypttab) or update its definition
if already present.
- - Use I(absent) to remove a line with matching I(name).
- - Use I(opts_present) to add options to those already present; options with
+ - Use V(absent) to remove a line with matching O(name).
+ - Use V(opts_present) to add options to those already present; options with
different values will be updated.
- - Use I(opts_absent) to remove options from the existing set.
+ - Use V(opts_absent) to remove options from the existing set.
type: str
required: true
choices: [ absent, opts_absent, opts_present, present ]
backing_device:
description:
- Path to the underlying block device or file, or the UUID of a block-device
- prefixed with I(UUID=).
+ prefixed with V(UUID=).
type: str
password:
description:
- Encryption password, the path to a file containing the password, or
- C(-) or unset if the password should be entered at boot.
+ V(-) or unset if the password should be entered at boot.
type: path
opts:
description:
- - A comma-delimited list of options. See C(crypttab(5) ) for details.
+ - A comma-delimited list of options. See V(crypttab(5\)) for details.
type: str
path:
description:
- - Path to file to use instead of C(/etc/crypttab).
+ - Path to file to use instead of V(/etc/crypttab).
- This might be useful in a chroot environment.
type: path
default: /etc/crypttab
diff --git a/ansible_collections/community/general/plugins/modules/datadog_downtime.py b/ansible_collections/community/general/plugins/modules/datadog_downtime.py
index 6e506eb85..a3a6a660f 100644
--- a/ansible_collections/community/general/plugins/modules/datadog_downtime.py
+++ b/ansible_collections/community/general/plugins/modules/datadog_downtime.py
@@ -38,7 +38,7 @@ options:
api_host:
description:
- The URL to the Datadog API.
- - This value can also be set with the C(DATADOG_HOST) environment variable.
+ - This value can also be set with the E(DATADOG_HOST) environment variable.
required: false
default: https://api.datadoghq.com
type: str
@@ -57,7 +57,7 @@ options:
id:
description:
- The identifier of the downtime.
- - If empty, a new downtime gets created, otherwise it is either updated or deleted depending of the C(state).
+ - If empty, a new downtime gets created, otherwise it is either updated or deleted depending of the O(state).
- To keep your playbook idempotent, you should save the identifier in a file and read it in a lookup.
type: int
monitor_tags:
@@ -99,7 +99,7 @@ options:
- For example, to have a recurring event on the first day of each month,
select a type of rrule and set the C(FREQ) to C(MONTHLY) and C(BYMONTHDAY) to C(1).
- Most common rrule options from the iCalendar Spec are supported.
- - Attributes specifying the duration in C(RRULE) are not supported (e.g. C(DTSTART), C(DTEND), C(DURATION)).
+ - Attributes specifying the duration in C(RRULE) are not supported (for example C(DTSTART), C(DTEND), C(DURATION)).
type: str
"""
@@ -248,7 +248,8 @@ def build_downtime(module):
downtime.timezone = module.params["timezone"]
if module.params["rrule"]:
downtime.recurrence = DowntimeRecurrence(
- rrule=module.params["rrule"]
+ rrule=module.params["rrule"],
+ type="rrule",
)
return downtime
diff --git a/ansible_collections/community/general/plugins/modules/datadog_event.py b/ansible_collections/community/general/plugins/modules/datadog_event.py
index b8161eca6..6008b565b 100644
--- a/ansible_collections/community/general/plugins/modules/datadog_event.py
+++ b/ansible_collections/community/general/plugins/modules/datadog_event.py
@@ -82,7 +82,7 @@ options:
description: ["An arbitrary string to use for aggregation."]
validate_certs:
description:
- - If C(false), SSL certificates will not be validated. This should only be used
+ - If V(false), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
type: bool
default: true
diff --git a/ansible_collections/community/general/plugins/modules/datadog_monitor.py b/ansible_collections/community/general/plugins/modules/datadog_monitor.py
index f58df358b..75ae8c233 100644
--- a/ansible_collections/community/general/plugins/modules/datadog_monitor.py
+++ b/ansible_collections/community/general/plugins/modules/datadog_monitor.py
@@ -16,7 +16,6 @@ short_description: Manages Datadog monitors
description:
- Manages monitors within Datadog.
- Options as described on https://docs.datadoghq.com/api/.
- - The type C(event-v2) was added in community.general 4.8.0.
author: Sebastian Kornehl (@skornehl)
requirements: [datadog]
extends_documentation_fragment:
@@ -34,8 +33,8 @@ options:
type: str
api_host:
description:
- - The URL to the Datadog API. Default value is C(https://api.datadoghq.com).
- - This value can also be set with the C(DATADOG_HOST) environment variable.
+ - The URL to the Datadog API. Default value is V(https://api.datadoghq.com).
+ - This value can also be set with the E(DATADOG_HOST) environment variable.
required: false
type: str
version_added: '0.2.0'
@@ -59,8 +58,9 @@ options:
type:
description:
- The type of the monitor.
- - The types C(query alert), C(trace-analytics alert) and C(rum alert) were added in community.general 2.1.0.
- - The type C(composite) was added in community.general 3.4.0.
+ - The types V(query alert), V(trace-analytics alert) and V(rum alert) were added in community.general 2.1.0.
+ - The type V(composite) was added in community.general 3.4.0.
+ - The type V(event-v2 alert) was added in community.general 4.8.0.
choices:
- metric alert
- service check
@@ -117,7 +117,7 @@ options:
escalation_message:
description:
- A message to include with a re-notification. Supports the '@username' notification we allow elsewhere.
- - Not applicable if I(renotify_interval=None).
+ - Not applicable if O(renotify_interval=none).
type: str
notify_audit:
description:
@@ -130,7 +130,7 @@ options:
- A dictionary of thresholds by status.
- Only available for service checks and metric alerts.
- Because each of them can have multiple thresholds, we do not define them directly in the query.
- - "If not specified, it defaults to: C({'ok': 1, 'critical': 1, 'warning': 1})."
+ - "If not specified, it defaults to: V({'ok': 1, 'critical': 1, 'warning': 1})."
locked:
description:
- Whether changes to this monitor should be restricted to the creator or admins.
@@ -167,6 +167,32 @@ options:
- Integer from 1 (high) to 5 (low) indicating alert severity.
type: int
version_added: 4.6.0
+ notification_preset_name:
+ description:
+ - Toggles the display of additional content sent in the monitor notification.
+ choices:
+ - show_all
+ - hide_query
+ - hide_handles
+ - hide_all
+ type: str
+ version_added: 7.1.0
+ renotify_occurrences:
+ description:
+ - The number of times re-notification messages should be sent on the current status at the provided re-notification interval.
+ type: int
+ version_added: 7.1.0
+ renotify_statuses:
+ description:
+ - The types of monitor statuses for which re-notification messages are sent.
+ choices:
+ - alert
+ - warn
+ - no data
+ type: list
+ elements: str
+ version_added: 7.1.0
+
'''
EXAMPLES = '''
@@ -175,6 +201,10 @@ EXAMPLES = '''
type: "metric alert"
name: "Test monitor"
state: "present"
+ renotify_interval: 30
+ renotify_occurrences: 1
+ renotify_statuses: ["warn"]
+ notification_preset_name: "show_all"
query: "datadog.agent.up.over('host:host1').last(2).count_by_status()"
notification_message: "Host [[host.name]] with IP [[host.ip]] is failing to report to datadog."
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
@@ -254,6 +284,9 @@ def main():
id=dict(),
include_tags=dict(required=False, default=True, type='bool'),
priority=dict(type='int'),
+ notification_preset_name=dict(choices=['show_all', 'hide_query', 'hide_handles', 'hide_all']),
+ renotify_occurrences=dict(type='int'),
+ renotify_statuses=dict(type='list', elements='str', choices=['alert', 'warn', 'no data']),
)
)
@@ -368,6 +401,9 @@ def install_monitor(module):
"new_host_delay": module.params['new_host_delay'],
"evaluation_delay": module.params['evaluation_delay'],
"include_tags": module.params['include_tags'],
+ "notification_preset_name": module.params['notification_preset_name'],
+ "renotify_occurrences": module.params['renotify_occurrences'],
+ "renotify_statuses": module.params['renotify_statuses'],
}
if module.params['type'] == "service check":
diff --git a/ansible_collections/community/general/plugins/modules/dconf.py b/ansible_collections/community/general/plugins/modules/dconf.py
index 8c325486c..065cf1a6a 100644
--- a/ansible_collections/community/general/plugins/modules/dconf.py
+++ b/ansible_collections/community/general/plugins/modules/dconf.py
@@ -46,11 +46,11 @@ notes:
- Keep in mind that the C(dconf) CLI tool, which this module wraps around,
utilises an unusual syntax for the values (GVariant). For example, if you
wanted to provide a string value, the correct syntax would be
- I(value="'myvalue'") - with single quotes as part of the Ansible parameter
+ O(value="'myvalue'") - with single quotes as part of the Ansible parameter
value.
- When using loops in combination with a value like
- "[('xkb', 'us'), ('xkb', 'se')]", you need to be aware of possible
- type conversions. Applying a filter C({{ item.value | string }})
+ V("[('xkb', 'us'\), ('xkb', 'se'\)]"), you need to be aware of possible
+ type conversions. Applying a filter V({{ item.value | string }})
to the parameter variable can avoid potential conversion problems.
- The easiest way to figure out exact syntax/value you need to provide for a
key is by making the configuration change in application affected by the
@@ -76,7 +76,7 @@ options:
- Value to set for the specified dconf key. Value should be specified in
GVariant format. Due to complexity of this format, it is best to have a
look at existing values in the dconf database.
- - Required for I(state=present).
+ - Required for O(state=present).
- Although the type is specified as "raw", it should typically be
specified as a string. However, boolean values in particular are
handled properly even when specified as booleans rather than strings
@@ -400,7 +400,7 @@ class DconfPreference(object):
rc, out, err = dbus_wrapper.run_command(command)
if rc != 0:
- self.module.fail_json(msg='dconf failed while reseting the value with error: %s' % err,
+ self.module.fail_json(msg='dconf failed while resetting the value with error: %s' % err,
out=out,
err=err)
diff --git a/ansible_collections/community/general/plugins/modules/deploy_helper.py b/ansible_collections/community/general/plugins/modules/deploy_helper.py
index f0246cae6..b47ed8254 100644
--- a/ansible_collections/community/general/plugins/modules/deploy_helper.py
+++ b/ansible_collections/community/general/plugins/modules/deploy_helper.py
@@ -20,8 +20,9 @@ description:
- The Deploy Helper manages some of the steps common in deploying software.
It creates a folder structure, manages a symlink for the current release
and cleans up old releases.
- - "Running it with the I(state=query) or I(state=present) will return the C(deploy_helper) fact.
- C(project_path), whatever you set in the I(path) parameter,
+ # TODO: convert below to RETURN documentation!
+ - "Running it with the O(state=query) or O(state=present) will return the C(deploy_helper) fact.
+ C(project_path), whatever you set in the O(path) parameter,
C(current_path), the path to the symlink that points to the active release,
C(releases_path), the path to the folder to keep releases in,
C(shared_path), the path to the folder to keep shared resources in,
@@ -50,33 +51,33 @@ options:
type: str
description:
- The state of the project.
- C(query) will only gather facts,
- C(present) will create the project I(root) folder, and in it the I(releases) and I(shared) folders,
- C(finalize) will remove the unfinished_filename file, create a symlink to the newly
- deployed release and optionally clean old releases,
- C(clean) will remove failed & old releases,
- C(absent) will remove the project folder (synonymous to the M(ansible.builtin.file) module with I(state=absent)).
+ - V(query) will only gather facts.
+ - V(present) will create the project C(root) folder, and in it the C(releases) and C(shared) folders.
+ - V(finalize) will remove the unfinished_filename file, create a symlink to the newly
+ deployed release and optionally clean old releases.
+ - V(clean) will remove failed & old releases.
+ - V(absent) will remove the project folder (synonymous to the M(ansible.builtin.file) module with O(state=absent)).
choices: [ present, finalize, absent, clean, query ]
default: present
release:
type: str
description:
- - The release version that is being deployed. Defaults to a timestamp format %Y%m%d%H%M%S (i.e. '20141119223359').
- This parameter is optional during I(state=present), but needs to be set explicitly for I(state=finalize).
- You can use the generated fact I(release={{ deploy_helper.new_release }}).
+ - The release version that is being deployed. Defaults to a timestamp format C(%Y%m%d%H%M%S) (for example V(20141119223359)).
+ This parameter is optional during O(state=present), but needs to be set explicitly for O(state=finalize).
+ You can use the generated fact C(release={{ deploy_helper.new_release }}).
releases_path:
type: str
description:
- - The name of the folder that will hold the releases. This can be relative to I(path) or absolute.
+ - The name of the folder that will hold the releases. This can be relative to O(path) or absolute.
Returned in the C(deploy_helper.releases_path) fact.
default: releases
shared_path:
type: path
description:
- - The name of the folder that will hold the shared resources. This can be relative to I(path) or absolute.
+ - The name of the folder that will hold the shared resources. This can be relative to O(path) or absolute.
If this is set to an empty string, no shared folder will be created.
Returned in the C(deploy_helper.shared_path) fact.
default: shared
@@ -84,38 +85,38 @@ options:
current_path:
type: path
description:
- - The name of the symlink that is created when the deploy is finalized. Used in I(finalize) and I(clean).
+ - The name of the symlink that is created when the deploy is finalized. Used in O(state=finalize) and O(state=clean).
Returned in the C(deploy_helper.current_path) fact.
default: current
unfinished_filename:
type: str
description:
- - The name of the file that indicates a deploy has not finished. All folders in the I(releases_path) that
- contain this file will be deleted on I(state=finalize) with I(clean=True), or I(state=clean). This file is
- automatically deleted from the I(new_release_path) during I(state=finalize).
+ - The name of the file that indicates a deploy has not finished. All folders in the O(releases_path) that
+ contain this file will be deleted on O(state=finalize) with O(clean=true), or O(state=clean). This file is
+ automatically deleted from the C(new_release_path) during O(state=finalize).
default: DEPLOY_UNFINISHED
clean:
description:
- - Whether to run the clean procedure in case of I(state=finalize).
+ - Whether to run the clean procedure in case of O(state=finalize).
type: bool
default: true
keep_releases:
type: int
description:
- - The number of old releases to keep when cleaning. Used in I(finalize) and I(clean). Any unfinished builds
+ - The number of old releases to keep when cleaning. Used in O(state=finalize) and O(state=clean). Any unfinished builds
will be deleted first, so only correct releases will count. The current version will not count.
default: 5
notes:
- - Facts are only returned for I(state=query) and I(state=present). If you use both, you should pass any overridden
+ - Facts are only returned for O(state=query) and O(state=present). If you use both, you should pass any overridden
parameters to both calls, otherwise the second call will overwrite the facts of the first one.
- - When using I(state=clean), the releases are ordered by I(creation date). You should be able to switch to a
+ - When using O(state=clean), the releases are ordered by I(creation date). You should be able to switch to a
new naming strategy without problems.
- - Because of the default behaviour of generating the I(new_release) fact, this module will not be idempotent
- unless you pass your own release name with I(release). Due to the nature of deploying software, this should not
+ - Because of the default behaviour of generating the C(new_release) fact, this module will not be idempotent
+ unless you pass your own release name with O(release). Due to the nature of deploying software, this should not
be much of a problem.
extends_documentation_fragment:
- ansible.builtin.files
diff --git a/ansible_collections/community/general/plugins/modules/dimensiondata_network.py b/ansible_collections/community/general/plugins/modules/dimensiondata_network.py
index 8c1469063..cfb7d61cd 100644
--- a/ansible_collections/community/general/plugins/modules/dimensiondata_network.py
+++ b/ansible_collections/community/general/plugins/modules/dimensiondata_network.py
@@ -84,7 +84,7 @@ EXAMPLES = '''
RETURN = '''
network:
description: Dictionary describing the network.
- returned: On success when I(state=present).
+ returned: On success when O(state=present).
type: complex
contains:
id:
diff --git a/ansible_collections/community/general/plugins/modules/dimensiondata_vlan.py b/ansible_collections/community/general/plugins/modules/dimensiondata_vlan.py
index 7d83ddc69..9d129f3de 100644
--- a/ansible_collections/community/general/plugins/modules/dimensiondata_vlan.py
+++ b/ansible_collections/community/general/plugins/modules/dimensiondata_vlan.py
@@ -51,20 +51,20 @@ options:
private_ipv4_prefix_size:
description:
- The size of the IPv4 address space, e.g 24.
- - Required, if C(private_ipv4_base_address) is specified.
+ - Required, if O(private_ipv4_base_address) is specified.
type: int
default: 0
state:
description:
- The desired state for the target VLAN.
- - C(readonly) ensures that the state is only ever read, not modified (the module will fail if the resource does not exist).
+ - V(readonly) ensures that the state is only ever read, not modified (the module will fail if the resource does not exist).
choices: [present, absent, readonly]
default: present
type: str
allow_expand:
description:
- Permit expansion of the target VLAN's network if the module parameters specify a larger network than the VLAN currently possesses.
- - If C(False), the module will fail under these conditions.
+ - If V(false), the module will fail under these conditions.
- This is intended to prevent accidental expansion of a VLAN's network (since this operation is not reversible).
type: bool
default: false
@@ -105,7 +105,7 @@ EXAMPLES = '''
RETURN = '''
vlan:
description: Dictionary describing the VLAN.
- returned: On success when I(state) is 'present'
+ returned: On success when O(state=present)
type: complex
contains:
id:
diff --git a/ansible_collections/community/general/plugins/modules/discord.py b/ansible_collections/community/general/plugins/modules/discord.py
index 8b5391d44..130649f07 100644
--- a/ansible_collections/community/general/plugins/modules/discord.py
+++ b/ansible_collections/community/general/plugins/modules/discord.py
@@ -43,7 +43,7 @@ options:
content:
description:
- Content of the message to the Discord channel.
- - At least one of I(content) and I(embeds) must be specified.
+ - At least one of O(content) and O(embeds) must be specified.
type: str
username:
description:
@@ -55,7 +55,7 @@ options:
type: str
tts:
description:
- - Set this to C(true) if this is a TTS (Text to Speech) message.
+ - Set this to V(true) if this is a TTS (Text to Speech) message.
type: bool
default: false
embeds:
@@ -63,7 +63,7 @@ options:
- Send messages as Embeds to the Discord channel.
- Embeds can have a colored border, embedded images, text fields and more.
- "Allowed parameters are described in the Discord Docs: U(https://discord.com/developers/docs/resources/channel#embed-object)"
- - At least one of I(content) and I(embeds) must be specified.
+ - At least one of O(content) and O(embeds) must be specified.
type: list
elements: dict
'''
diff --git a/ansible_collections/community/general/plugins/modules/django_manage.py b/ansible_collections/community/general/plugins/modules/django_manage.py
index 537cf0fa7..114ec0353 100644
--- a/ansible_collections/community/general/plugins/modules/django_manage.py
+++ b/ansible_collections/community/general/plugins/modules/django_manage.py
@@ -16,7 +16,7 @@ module: django_manage
short_description: Manages a Django application
description:
- Manages a Django application using the C(manage.py) application frontend to C(django-admin). With the
- I(virtualenv) parameter, all management commands will be executed by the given C(virtualenv) installation.
+ O(virtualenv) parameter, all management commands will be executed by the given C(virtualenv) installation.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -29,20 +29,20 @@ options:
description:
- The name of the Django management command to run. The commands listed below are built in this module and have some basic parameter validation.
- >
- C(cleanup) - clean up old data from the database (deprecated in Django 1.5). This parameter will be
- removed in community.general 9.0.0. Use C(clearsessions) instead.
- - C(collectstatic) - Collects the static files into C(STATIC_ROOT).
- - C(createcachetable) - Creates the cache tables for use with the database cache backend.
- - C(flush) - Removes all data from the database.
- - C(loaddata) - Searches for and loads the contents of the named I(fixtures) into the database.
- - C(migrate) - Synchronizes the database state with models and migrations.
+ V(cleanup) - clean up old data from the database (deprecated in Django 1.5). This parameter will be
+ removed in community.general 9.0.0. Use V(clearsessions) instead.
+ - V(collectstatic) - Collects the static files into C(STATIC_ROOT).
+ - V(createcachetable) - Creates the cache tables for use with the database cache backend.
+ - V(flush) - Removes all data from the database.
+ - V(loaddata) - Searches for and loads the contents of the named O(fixtures) into the database.
+ - V(migrate) - Synchronizes the database state with models and migrations.
- >
- C(syncdb) - Synchronizes the database state with models and migrations (deprecated in Django 1.7).
- This parameter will be removed in community.general 9.0.0. Use C(migrate) instead.
- - C(test) - Runs tests for all installed apps.
+ V(syncdb) - Synchronizes the database state with models and migrations (deprecated in Django 1.7).
+ This parameter will be removed in community.general 9.0.0. Use V(migrate) instead.
+ - V(test) - Runs tests for all installed apps.
- >
- C(validate) - Validates all installed models (deprecated in Django 1.7). This parameter will be
- removed in community.general 9.0.0. Use C(check) instead.
+ V(validate) - Validates all installed models (deprecated in Django 1.7). This parameter will be
+ removed in community.general 9.0.0. Use V(check) instead.
- Other commands can be entered, but will fail if they are unknown to Django. Other commands that may
prompt for user input should be run with the C(--noinput) flag.
type: str
@@ -55,14 +55,14 @@ options:
aliases: [app_path, chdir]
settings:
description:
- - The Python path to the application's settings module, such as C(myapp.settings).
+ - The Python path to the application's settings module, such as V(myapp.settings).
type: path
required: false
pythonpath:
description:
- A directory to add to the Python path. Typically used to include the settings module if it is located
external to the application directory.
- - This would be equivalent to adding I(pythonpath)'s value to the C(PYTHONPATH) environment variable.
+ - This would be equivalent to adding O(pythonpath)'s value to the E(PYTHONPATH) environment variable.
type: path
required: false
aliases: [python_path]
@@ -73,54 +73,54 @@ options:
aliases: [virtual_env]
apps:
description:
- - A list of space-delimited apps to target. Used by the C(test) command.
+ - A list of space-delimited apps to target. Used by the V(test) command.
type: str
required: false
cache_table:
description:
- - The name of the table used for database-backed caching. Used by the C(createcachetable) command.
+ - The name of the table used for database-backed caching. Used by the V(createcachetable) command.
type: str
required: false
clear:
description:
- Clear the existing files before trying to copy or link the original file.
- - Used only with the C(collectstatic) command. The C(--noinput) argument will be added automatically.
+ - Used only with the V(collectstatic) command. The C(--noinput) argument will be added automatically.
required: false
default: false
type: bool
database:
description:
- - The database to target. Used by the C(createcachetable), C(flush), C(loaddata), C(syncdb),
- and C(migrate) commands.
+ - The database to target. Used by the V(createcachetable), V(flush), V(loaddata), V(syncdb),
+ and V(migrate) commands.
type: str
required: false
failfast:
description:
- - Fail the command immediately if a test fails. Used by the C(test) command.
+ - Fail the command immediately if a test fails. Used by the V(test) command.
required: false
default: false
type: bool
aliases: [fail_fast]
fixtures:
description:
- - A space-delimited list of fixture file names to load in the database. B(Required) by the C(loaddata) command.
+ - A space-delimited list of fixture file names to load in the database. B(Required) by the V(loaddata) command.
type: str
required: false
skip:
description:
- - Will skip over out-of-order missing migrations, you can only use this parameter with C(migrate) command.
+ - Will skip over out-of-order missing migrations, you can only use this parameter with V(migrate) command.
required: false
type: bool
merge:
description:
- Will run out-of-order or missing migrations as they are not rollback migrations, you can only use this
- parameter with C(migrate) command.
+ parameter with V(migrate) command.
required: false
type: bool
link:
description:
- Will create links to the files instead of copying them, you can only use this parameter with
- C(collectstatic) command.
+ V(collectstatic) command.
required: false
type: bool
testrunner:
@@ -133,9 +133,9 @@ options:
ack_venv_creation_deprecation:
description:
- >-
- When a I(virtualenv) is set but the virtual environment does not exist, the current behavior is
+ When a O(virtualenv) is set but the virtual environment does not exist, the current behavior is
to create a new virtual environment. That behavior is deprecated and if that case happens it will
- generate a deprecation warning. Set this flag to C(true) to suppress the deprecation warning.
+ generate a deprecation warning. Set this flag to V(true) to suppress the deprecation warning.
- Please note that you will receive no further warning about this being removed until the module
will start failing in such cases from community.general 9.0.0 on.
type: bool
@@ -146,19 +146,19 @@ notes:
B(ATTENTION - DEPRECATION): Support for Django releases older than 4.1 will be removed in
community.general version 9.0.0 (estimated to be released in May 2024).
Please notice that Django 4.1 requires Python 3.8 or greater.
- - C(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the I(virtualenv) parameter
+ - C(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the O(virtualenv) parameter
is specified. This requirement is deprecated and will be removed in community.general version 9.0.0.
- - This module will create a virtualenv if the I(virtualenv) parameter is specified and a virtual environment does not already
+ - This module will create a virtualenv if the O(virtualenv) parameter is specified and a virtual environment does not already
exist at the given location. This behavior is deprecated and will be removed in community.general version 9.0.0.
- - The parameter I(virtualenv) will remain in use, but it will require the specified virtualenv to exist.
+ - The parameter O(virtualenv) will remain in use, but it will require the specified virtualenv to exist.
The recommended way to create one in Ansible is by using M(ansible.builtin.pip).
- - This module assumes English error messages for the C(createcachetable) command to detect table existence,
+ - This module assumes English error messages for the V(createcachetable) command to detect table existence,
unfortunately.
- - To be able to use the C(migrate) command with django versions < 1.7, you must have C(south) installed and added
+ - To be able to use the V(migrate) command with django versions < 1.7, you must have C(south) installed and added
as an app in your settings.
- - To be able to use the C(collectstatic) command, you must have enabled staticfiles in your settings.
- - Your C(manage.py) application must be executable (rwxr-xr-x), and must have a valid shebang,
- i.e. C(#!/usr/bin/env python), for invoking the appropriate Python interpreter.
+ - To be able to use the V(collectstatic) command, you must have enabled staticfiles in your settings.
+ - Your C(manage.py) application must be executable (C(rwxr-xr-x)), and must have a valid shebang,
+ for example C(#!/usr/bin/env python), for invoking the appropriate Python interpreter.
seealso:
- name: django-admin and manage.py Reference
description: Reference for C(django-admin) or C(manage.py) commands.
diff --git a/ansible_collections/community/general/plugins/modules/dnf_config_manager.py b/ansible_collections/community/general/plugins/modules/dnf_config_manager.py
new file mode 100644
index 000000000..069fd0ddc
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/dnf_config_manager.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2023, Andrew Hyatt <andy@hyatt.xyz>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: dnf_config_manager
+short_description: Enable or disable dnf repositories using config-manager
+version_added: 8.2.0
+description:
+ - This module enables or disables repositories using the C(dnf config-manager) sub-command.
+author: Andrew Hyatt (@ahyattdev) <andy@hyatt.xyz>
+requirements:
+ - dnf
+ - dnf-plugins-core
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Repository ID, for example V(crb).
+ default: []
+ required: false
+ type: list
+ elements: str
+ state:
+ description:
+ - Whether the repositories should be V(enabled) or V(disabled).
+ default: enabled
+ required: false
+ type: str
+ choices: [enabled, disabled]
+seealso:
+ - module: ansible.builtin.dnf
+ - module: ansible.builtin.yum_repository
+'''
+
+EXAMPLES = r'''
+- name: Ensure the crb repository is enabled
+ community.general.dnf_config_manager:
+ name: crb
+ state: enabled
+
+- name: Ensure the appstream and zfs repositories are disabled
+ community.general.dnf_config_manager:
+ name:
+ - appstream
+ - zfs
+ state: disabled
+'''
+
+RETURN = r'''
+repo_states_pre:
+ description: Repo IDs before action taken.
+ returned: success
+ type: dict
+ contains:
+ enabled:
+ description: Enabled repository IDs.
+ returned: success
+ type: list
+ elements: str
+ disabled:
+ description: Disabled repository IDs.
+ returned: success
+ type: list
+ elements: str
+ sample:
+ enabled:
+ - appstream
+ - baseos
+ - crb
+ disabled:
+ - appstream-debuginfo
+ - appstream-source
+ - baseos-debuginfo
+ - baseos-source
+ - crb-debug
+ - crb-source
+repo_states_post:
+ description: Repository states after action taken.
+ returned: success
+ type: dict
+ contains:
+ enabled:
+ description: Enabled repository IDs.
+ returned: success
+ type: list
+ elements: str
+ disabled:
+ description: Disabled repository IDs.
+ returned: success
+ type: list
+ elements: str
+ sample:
+ enabled:
+ - appstream
+ - baseos
+ - crb
+ disabled:
+ - appstream-debuginfo
+ - appstream-source
+ - baseos-debuginfo
+ - baseos-source
+ - crb-debug
+ - crb-source
+changed_repos:
+ description: Repositories changed.
+ returned: success
+ type: list
+ elements: str
+ sample: [ 'crb' ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+import re
+
+DNF_BIN = "/usr/bin/dnf"
+REPO_ID_RE = re.compile(r'^Repo-id\s*:\s*(\S+)$')
+REPO_STATUS_RE = re.compile(r'^Repo-status\s*:\s*(disabled|enabled)$')
+
+
+def get_repo_states(module):
+ rc, out, err = module.run_command([DNF_BIN, 'repolist', '--all', '--verbose'], check_rc=True)
+
+ repos = dict()
+ last_repo = ''
+ for i, line in enumerate(out.split('\n')):
+ m = REPO_ID_RE.match(line)
+ if m:
+ if len(last_repo) > 0:
+ module.fail_json(msg='dnf repolist parse failure: parsed another repo id before next status')
+ last_repo = m.group(1)
+ continue
+ m = REPO_STATUS_RE.match(line)
+ if m:
+ if len(last_repo) == 0:
+ module.fail_json(msg='dnf repolist parse failure: parsed status before repo id')
+ repos[last_repo] = m.group(1)
+ last_repo = ''
+ return repos
+
+
+def set_repo_states(module, repo_ids, state):
+ module.run_command([DNF_BIN, 'config-manager', '--set-{0}'.format(state)] + repo_ids, check_rc=True)
+
+
+def pack_repo_states_for_return(states):
+ enabled = []
+ disabled = []
+ for repo_id in states:
+ if states[repo_id] == 'enabled':
+ enabled.append(repo_id)
+ else:
+ disabled.append(repo_id)
+
+ # Sort for consistent results
+ enabled.sort()
+ disabled.sort()
+
+ return {'enabled': enabled, 'disabled': disabled}
+
+
+def main():
+ module_args = dict(
+ name=dict(type='list', elements='str', required=False, default=[]),
+ state=dict(type='str', required=False, choices=['enabled', 'disabled'], default='enabled')
+ )
+
+ result = dict(
+ changed=False
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True
+ )
+
+ if not os.path.exists(DNF_BIN):
+ module.fail_json(msg="%s was not found" % DNF_BIN)
+
+ repo_states = get_repo_states(module)
+ result['repo_states_pre'] = pack_repo_states_for_return(repo_states)
+
+ desired_repo_state = module.params['state']
+ names = module.params['name']
+
+ to_change = []
+ for repo_id in names:
+ if repo_id not in repo_states:
+ module.fail_json(msg="did not find repo with ID '{0}' in dnf repolist --all --verbose".format(repo_id))
+ if repo_states[repo_id] != desired_repo_state:
+ to_change.append(repo_id)
+ result['changed'] = len(to_change) > 0
+ result['changed_repos'] = to_change
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ if len(to_change) > 0:
+ set_repo_states(module, to_change, desired_repo_state)
+
+ repo_states_post = get_repo_states(module)
+ result['repo_states_post'] = pack_repo_states_for_return(repo_states_post)
+
+ for repo_id in to_change:
+ if repo_states_post[repo_id] != desired_repo_state:
+ module.fail_json(msg="dnf config-manager failed to make '{0}' {1}".format(repo_id, desired_repo_state))
+
+ module.exit_json(**result)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/dnf_versionlock.py b/ansible_collections/community/general/plugins/modules/dnf_versionlock.py
index fac3ad78d..3fcf132ea 100644
--- a/ansible_collections/community/general/plugins/modules/dnf_versionlock.py
+++ b/ansible_collections/community/general/plugins/modules/dnf_versionlock.py
@@ -38,7 +38,7 @@ options:
description:
- Package name spec to add or exclude to or delete from the C(locklist)
using the format expected by the C(dnf repoquery) command.
- - This parameter is mutually exclusive with I(state=clean).
+ - This parameter is mutually exclusive with O(state=clean).
type: list
required: false
elements: str
@@ -52,19 +52,19 @@ options:
default: false
state:
description:
- - Whether to add (C(present) or C(excluded)) to or remove (C(absent) or
- C(clean)) from the C(locklist).
- - C(present) will add a package name spec to the C(locklist). If there is a
+ - Whether to add (V(present) or V(excluded)) to or remove (V(absent) or
+ V(clean)) from the C(locklist).
+ - V(present) will add a package name spec to the C(locklist). If there is a
installed package that matches, then only that version will be added.
Otherwise, all available package versions will be added.
- - C(excluded) will add a package name spec as excluded to the
+ - V(excluded) will add a package name spec as excluded to the
C(locklist). It means that packages represented by the package name
spec will be excluded from transaction operations. All available
package versions will be added.
- - C(absent) will delete entries in the C(locklist) that match the
+ - V(absent) will delete entries in the C(locklist) that match the
package name spec.
- - C(clean) will delete all entries in the C(locklist). This option is
- mutually exclusive with C(name).
+ - V(clean) will delete all entries in the C(locklist). This option is
+ mutually exclusive with O(name).
choices: [ 'absent', 'clean', 'excluded', 'present' ]
type: str
default: present
diff --git a/ansible_collections/community/general/plugins/modules/dnsimple.py b/ansible_collections/community/general/plugins/modules/dnsimple.py
index df41f73a6..c5829e36e 100644
--- a/ansible_collections/community/general/plugins/modules/dnsimple.py
+++ b/ansible_collections/community/general/plugins/modules/dnsimple.py
@@ -26,13 +26,13 @@ attributes:
options:
account_email:
description:
- - Account email. If omitted, the environment variables C(DNSIMPLE_EMAIL) and C(DNSIMPLE_API_TOKEN) will be looked for.
+ - Account email. If omitted, the environment variables E(DNSIMPLE_EMAIL) and E(DNSIMPLE_API_TOKEN) will be looked for.
- "If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)."
- "C(.dnsimple) config files are only supported in dnsimple-python<2.0.0"
type: str
account_api_token:
description:
- - Account API token. See I(account_email) for more information.
+ - Account API token. See O(account_email) for more information.
type: str
domain:
description:
@@ -77,7 +77,7 @@ options:
solo:
description:
- Whether the record should be the only one for that record type and record name.
- - Only use with C(state) is set to C(present) on a record.
+ - Only use with O(state) is set to V(present) on a record.
type: 'bool'
default: false
sandbox:
@@ -178,7 +178,7 @@ class DNSimpleV2():
client = Client(sandbox=self.sandbox, email=self.account_email, access_token=self.account_api_token, user_agent="ansible/community.general")
else:
msg = "Option account_email or account_api_token not provided. " \
- "Dnsimple authentiction with a .dnsimple config file is not " \
+ "Dnsimple authentication with a .dnsimple config file is not " \
"supported with dnsimple-python>=2.0.0"
raise DNSimpleException(msg)
client.identity.whoami()
@@ -225,24 +225,24 @@ class DNSimpleV2():
self.client.domains.delete_domain(self.account.id, domain)
def get_records(self, zone, dnsimple_filter=None):
- """return dns ressource records which match a specified filter"""
+ """return dns resource records which match a specified filter"""
records_list = self._get_paginated_result(self.client.zones.list_records,
account_id=self.account.id,
zone=zone, filter=dnsimple_filter)
return [d.__dict__ for d in records_list]
def delete_record(self, domain, rid):
- """delete a single dns ressource record"""
+ """delete a single dns resource record"""
self.client.zones.delete_record(self.account.id, domain, rid)
def update_record(self, domain, rid, ttl=None, priority=None):
- """update a single dns ressource record"""
+ """update a single dns resource record"""
zr = ZoneRecordUpdateInput(ttl=ttl, priority=priority)
result = self.client.zones.update_record(self.account.id, str(domain), str(rid), zr).data.__dict__
return result
def create_record(self, domain, name, record_type, content, ttl=None, priority=None):
- """create a single dns ressource record"""
+ """create a single dns resource record"""
zr = ZoneRecordInput(name=name, type=record_type, content=content, ttl=ttl, priority=priority)
return self.client.zones.create_record(self.account.id, str(domain), zr).data.__dict__
diff --git a/ansible_collections/community/general/plugins/modules/dnsimple_info.py b/ansible_collections/community/general/plugins/modules/dnsimple_info.py
index 52fd53303..46c2877f7 100644
--- a/ansible_collections/community/general/plugins/modules/dnsimple_info.py
+++ b/ansible_collections/community/general/plugins/modules/dnsimple_info.py
@@ -83,7 +83,7 @@ dnsimple_domain_info:
description: Returns a list of dictionaries of all domains associated with the supplied account ID.
type: list
elements: dict
- returned: success when I(name) is not specified
+ returned: success when O(name) is not specified
sample:
- account_id: 1234
created_at: '2021-10-16T21:25:42Z'
@@ -120,7 +120,7 @@ dnsimple_records_info:
description: Returns a list of dictionaries with all records for the domain supplied.
type: list
elements: dict
- returned: success when I(name) is specified, but I(record) is not
+ returned: success when O(name) is specified, but O(record) is not
sample:
- content: ns1.dnsimple.com admin.dnsimple.com
created_at: '2021-10-16T19:07:34Z'
@@ -174,7 +174,7 @@ dnsimple_records_info:
type: str
dnsimple_record_info:
description: Returns a list of dictionaries that match the record supplied.
- returned: success when I(name) and I(record) are specified
+ returned: success when O(name) and O(record) are specified
type: list
elements: dict
sample:
@@ -239,9 +239,9 @@ with deps.declare("requests"):
def build_url(account, key, is_sandbox):
headers = {'Accept': 'application/json',
- 'Authorization': 'Bearer ' + key}
- url = 'https://api{sandbox}.dnsimple.com/'.format(
- sandbox=".sandbox" if is_sandbox else "") + 'v2/' + account
+ 'Authorization': 'Bearer {0}'.format(key)}
+ sandbox = '.sandbox' if is_sandbox else ''
+ url = 'https://api{sandbox}.dnsimple.com/v2/{account}'.format(sandbox=sandbox, account=account)
req = Request(url=url, headers=headers)
prepped_request = req.prepare()
return prepped_request
@@ -250,19 +250,21 @@ def build_url(account, key, is_sandbox):
def iterate_data(module, request_object):
base_url = request_object.url
response = Session().send(request_object)
- if 'pagination' in response.json():
- data = response.json()["data"]
- pages = response.json()["pagination"]["total_pages"]
- if int(pages) > 1:
- for page in range(1, pages):
- page = page + 1
- request_object.url = base_url + '&page=' + str(page)
- new_results = Session().send(request_object)
- data = data + new_results.json()["data"]
- return data
- else:
+ if 'pagination' not in response.json():
module.fail_json('API Call failed, check ID, key and sandbox values')
+ data = response.json()["data"]
+ total_pages = response.json()["pagination"]["total_pages"]
+ page = 1
+
+ while page < total_pages:
+ page = page + 1
+ request_object.url = '{url}&page={page}'.format(url=base_url, page=page)
+ new_results = Session().send(request_object)
+ data = data + new_results.json()['data']
+
+ return data
+
def record_info(dnsimple_mod, req_obj):
req_obj.url, req_obj.method = req_obj.url + '/zones/' + dnsimple_mod.params["name"] + '/records?name=' + dnsimple_mod.params["record"], 'GET'
diff --git a/ansible_collections/community/general/plugins/modules/dnsmadeeasy.py b/ansible_collections/community/general/plugins/modules/dnsmadeeasy.py
index 44587ca39..47d9430e7 100644
--- a/ansible_collections/community/general/plugins/modules/dnsmadeeasy.py
+++ b/ansible_collections/community/general/plugins/modules/dnsmadeeasy.py
@@ -87,14 +87,14 @@ options:
validate_certs:
description:
- - If C(false), SSL certificates will not be validated. This should only be used
+ - If V(false), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
type: bool
default: true
monitor:
description:
- - If C(true), add or change the monitor. This is applicable only for A records.
+ - If V(true), add or change the monitor. This is applicable only for A records.
type: bool
default: false
@@ -133,7 +133,7 @@ options:
contactList:
description:
- Name or id of the contact list that the monitor will notify.
- - The default C('') means the Account Owner.
+ - The default V('') means the Account Owner.
type: str
httpFqdn:
@@ -153,7 +153,7 @@ options:
failover:
description:
- - If C(true), add or change the failover. This is applicable only for A records.
+ - If V(true), add or change the failover. This is applicable only for A records.
type: bool
default: false
@@ -509,15 +509,15 @@ class DME2(object):
return json.dumps(data, separators=(',', ':'))
def createRecord(self, data):
- # @TODO update the cache w/ resultant record + id when impleneted
+ # @TODO update the cache w/ resultant record + id when implemented
return self.query(self.record_url, 'POST', data)
def updateRecord(self, record_id, data):
- # @TODO update the cache w/ resultant record + id when impleneted
+ # @TODO update the cache w/ resultant record + id when implemented
return self.query(self.record_url + '/' + str(record_id), 'PUT', data)
def deleteRecord(self, record_id):
- # @TODO remove record from the cache when impleneted
+ # @TODO remove record from the cache when implemented
return self.query(self.record_url + '/' + str(record_id), 'DELETE')
def getMonitor(self, record_id):
diff --git a/ansible_collections/community/general/plugins/modules/dpkg_divert.py b/ansible_collections/community/general/plugins/modules/dpkg_divert.py
index 4a1651f51..5f0d924fe 100644
--- a/ansible_collections/community/general/plugins/modules/dpkg_divert.py
+++ b/ansible_collections/community/general/plugins/modules/dpkg_divert.py
@@ -20,13 +20,13 @@ description:
- A diversion is for C(dpkg) the knowledge that only a given package
(or the local administrator) is allowed to install a file at a given
location. Other packages shipping their own version of this file will
- be forced to I(divert) it, i.e. to install it at another location. It
+ be forced to O(divert) it, that is to install it at another location. It
allows one to keep changes in a file provided by a debian package by
preventing its overwrite at package upgrade.
- This module manages diversions of debian packages files using the
C(dpkg-divert) commandline tool. It can either create or remove a
diversion for a given file, but also update an existing diversion
- to modify its I(holder) and/or its I(divert) location.
+ to modify its O(holder) and/or its O(divert) location.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -39,14 +39,14 @@ options:
description:
- The original and absolute path of the file to be diverted or
undiverted. This path is unique, i.e. it is not possible to get
- two diversions for the same I(path).
+ two diversions for the same O(path).
required: true
type: path
state:
description:
- - When I(state=absent), remove the diversion of the specified
- I(path); when I(state=present), create the diversion if it does
- not exist, or update its package I(holder) or I(divert) location,
+ - When O(state=absent), remove the diversion of the specified
+ O(path); when O(state=present), create the diversion if it does
+ not exist, or update its package O(holder) or O(divert) location,
if it already exists.
type: str
default: present
@@ -59,31 +59,31 @@ options:
- The actual package does not have to be installed or even to exist
for its name to be valid. If not specified, the diversion is hold
by 'LOCAL', that is reserved by/for dpkg for local diversions.
- - This parameter is ignored when I(state=absent).
+ - This parameter is ignored when O(state=absent).
type: str
divert:
description:
- The location where the versions of file will be diverted.
- Default is to add suffix C(.distrib) to the file path.
- - This parameter is ignored when I(state=absent).
+ - This parameter is ignored when O(state=absent).
type: path
rename:
description:
- - Actually move the file aside (when I(state=present)) or back (when
- I(state=absent)), but only when changing the state of the diversion.
+ - Actually move the file aside (when O(state=present)) or back (when
+ O(state=absent)), but only when changing the state of the diversion.
This parameter has no effect when attempting to add a diversion that
already exists or when removing an unexisting one.
- - Unless I(force=true), renaming fails if the destination file already
+ - Unless O(force=true), renaming fails if the destination file already
exists (this lock being a dpkg-divert feature, and bypassing it being
a module feature).
type: bool
default: false
force:
description:
- - When I(rename=true) and I(force=true), renaming is performed even if
+ - When O(rename=true) and O(force=true), renaming is performed even if
the target of the renaming exists, i.e. the existing contents of the
file at this location will be lost.
- - This parameter is ignored when I(rename=false).
+ - This parameter is ignored when O(rename=false).
type: bool
default: false
requirements:
diff --git a/ansible_collections/community/general/plugins/modules/easy_install.py b/ansible_collections/community/general/plugins/modules/easy_install.py
index 564493180..2e8fc2f4f 100644
--- a/ansible_collections/community/general/plugins/modules/easy_install.py
+++ b/ansible_collections/community/general/plugins/modules/easy_install.py
@@ -14,7 +14,7 @@ DOCUMENTATION = '''
module: easy_install
short_description: Installs Python libraries
description:
- - Installs Python libraries, optionally in a I(virtualenv)
+ - Installs Python libraries, optionally in a C(virtualenv)
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -26,13 +26,13 @@ options:
name:
type: str
description:
- - A Python library name
+ - A Python library name.
required: true
virtualenv:
type: str
description:
- - an optional I(virtualenv) directory path to install into. If the
- I(virtualenv) does not exist, it is created automatically
+ - An optional O(virtualenv) directory path to install into. If the
+ O(virtualenv) does not exist, it is created automatically.
virtualenv_site_packages:
description:
- Whether the virtual environment will inherit packages from the
@@ -46,21 +46,21 @@ options:
type: str
description:
- The command to create the virtual environment with. For example
- C(pyvenv), C(virtualenv), C(virtualenv2).
+ V(pyvenv), V(virtualenv), V(virtualenv2).
default: virtualenv
executable:
type: str
description:
- The explicit executable or a pathname to the executable to be used to
run easy_install for a specific version of Python installed in the
- system. For example C(easy_install-3.3), if there are both Python 2.7
+ system. For example V(easy_install-3.3), if there are both Python 2.7
and 3.3 installations in the system and you want to run easy_install
for the Python 3.3 installation.
default: easy_install
state:
type: str
description:
- - The desired state of the library. C(latest) ensures that the latest version is installed.
+ - The desired state of the library. V(latest) ensures that the latest version is installed.
choices: [present, latest]
default: present
notes:
@@ -68,8 +68,8 @@ notes:
libraries. Thus this module is not able to remove libraries. It is
generally recommended to use the M(ansible.builtin.pip) module which you can first install
using M(community.general.easy_install).
- - Also note that I(virtualenv) must be installed on the remote host if the
- C(virtualenv) parameter is specified.
+ - Also note that C(virtualenv) must be installed on the remote host if the
+ O(virtualenv) parameter is specified.
requirements: [ "virtualenv" ]
author: "Matt Wright (@mattupstate)"
'''
diff --git a/ansible_collections/community/general/plugins/modules/ejabberd_user.py b/ansible_collections/community/general/plugins/modules/ejabberd_user.py
index 397207ae6..d0b575e1c 100644
--- a/ansible_collections/community/general/plugins/modules/ejabberd_user.py
+++ b/ansible_collections/community/general/plugins/modules/ejabberd_user.py
@@ -78,6 +78,7 @@ EXAMPLES = '''
import syslog
from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
class EjabberdUser(object):
@@ -85,7 +86,7 @@ class EjabberdUser(object):
object manages user creation and deletion using ejabberdctl. The following
commands are currently supported:
* ejabberdctl register
- * ejabberdctl deregister
+ * ejabberdctl unregister
"""
def __init__(self, module):
@@ -95,6 +96,17 @@ class EjabberdUser(object):
self.host = module.params.get('host')
self.user = module.params.get('username')
self.pwd = module.params.get('password')
+ self.runner = CmdRunner(
+ module,
+ command="ejabberdctl",
+ arg_formats=dict(
+ cmd=cmd_runner_fmt.as_list(),
+ host=cmd_runner_fmt.as_list(),
+ user=cmd_runner_fmt.as_list(),
+ pwd=cmd_runner_fmt.as_list(),
+ ),
+ check_rc=False,
+ )
@property
def changed(self):
@@ -102,7 +114,7 @@ class EjabberdUser(object):
changed. It will return True if the user does not match the supplied
credentials and False if it does not
"""
- return self.run_command('check_password', [self.user, self.host, self.pwd])
+ return self.run_command('check_password', 'user host pwd', (lambda rc, out, err: bool(rc)))
@property
def exists(self):
@@ -110,7 +122,7 @@ class EjabberdUser(object):
host specified. If the user exists True is returned, otherwise False
is returned
"""
- return self.run_command('check_account', [self.user, self.host])
+ return self.run_command('check_account', 'user host', (lambda rc, out, err: not bool(rc)))
def log(self, entry):
""" This method will log information to the local syslog facility """
@@ -118,29 +130,36 @@ class EjabberdUser(object):
syslog.openlog('ansible-%s' % self.module._name)
syslog.syslog(syslog.LOG_NOTICE, entry)
- def run_command(self, cmd, options):
+ def run_command(self, cmd, options, process=None):
""" This method will run the any command specified and return the
returns using the Ansible common module
"""
- cmd = [self.module.get_bin_path('ejabberdctl'), cmd] + options
- self.log('command: %s' % " ".join(cmd))
- return self.module.run_command(cmd)
+ def _proc(*a):
+ return a
+
+ if process is None:
+ process = _proc
+
+ with self.runner("cmd " + options, output_process=process) as ctx:
+ res = ctx.run(cmd=cmd, host=self.host, user=self.user, pwd=self.pwd)
+ self.log('command: %s' % " ".join(ctx.run_info['cmd']))
+ return res
def update(self):
""" The update method will update the credentials for the user provided
"""
- return self.run_command('change_password', [self.user, self.host, self.pwd])
+ return self.run_command('change_password', 'user host pwd')
def create(self):
""" The create method will create a new user on the host with the
password provided
"""
- return self.run_command('register', [self.user, self.host, self.pwd])
+ return self.run_command('register', 'user host pwd')
def delete(self):
""" The delete method will delete the user from the host
"""
- return self.run_command('unregister', [self.user, self.host])
+ return self.run_command('unregister', 'user host')
def main():
@@ -150,7 +169,7 @@ def main():
username=dict(required=True, type='str'),
password=dict(type='str', no_log=True),
state=dict(default='present', choices=['present', 'absent']),
- logging=dict(default=False, type='bool') # deprecate in favour of c.g.syslogger?
+ logging=dict(default=False, type='bool', removed_in_version='10.0.0', removed_from_collection='community.general'),
),
required_if=[
('state', 'present', ['password']),
diff --git a/ansible_collections/community/general/plugins/modules/elasticsearch_plugin.py b/ansible_collections/community/general/plugins/modules/elasticsearch_plugin.py
index cd4bb45de..92b628a74 100644
--- a/ansible_collections/community/general/plugins/modules/elasticsearch_plugin.py
+++ b/ansible_collections/community/general/plugins/modules/elasticsearch_plugin.py
@@ -69,7 +69,6 @@ options:
plugin_bin:
description:
- Location of the plugin binary. If this file is not found, the default plugin binaries will be used.
- - The default changed in Ansible 2.4 to None.
type: path
plugin_dir:
description:
diff --git a/ansible_collections/community/general/plugins/modules/emc_vnx_sg_member.py b/ansible_collections/community/general/plugins/modules/emc_vnx_sg_member.py
index 487b6feef..b06cd01de 100644
--- a/ansible_collections/community/general/plugins/modules/emc_vnx_sg_member.py
+++ b/ansible_collections/community/general/plugins/modules/emc_vnx_sg_member.py
@@ -46,8 +46,8 @@ options:
state:
description:
- Indicates the desired lunid state.
- - C(present) ensures specified lunid is present in the Storage Group.
- - C(absent) ensures specified lunid is absent from Storage Group.
+ - V(present) ensures specified lunid is present in the Storage Group.
+ - V(absent) ensures specified lunid is absent from Storage Group.
default: present
choices: [ "present", "absent"]
type: str
diff --git a/ansible_collections/community/general/plugins/modules/etcd3.py b/ansible_collections/community/general/plugins/modules/etcd3.py
index 9cd027406..2fdc3f2f8 100644
--- a/ansible_collections/community/general/plugins/modules/etcd3.py
+++ b/ansible_collections/community/general/plugins/modules/etcd3.py
@@ -61,22 +61,22 @@ options:
type: str
description:
- The password to use for authentication.
- - Required if I(user) is defined.
+ - Required if O(user) is defined.
ca_cert:
type: path
description:
- The Certificate Authority to use to verify the etcd host.
- - Required if I(client_cert) and I(client_key) are defined.
+ - Required if O(client_cert) and O(client_key) are defined.
client_cert:
type: path
description:
- PEM formatted certificate chain file to be used for SSL client authentication.
- - Required if I(client_key) is defined.
+ - Required if O(client_key) is defined.
client_key:
type: path
description:
- PEM formatted file that contains your private key to be used for SSL client authentication.
- - Required if I(client_cert) is defined.
+ - Required if O(client_cert) is defined.
timeout:
type: int
description:
diff --git a/ansible_collections/community/general/plugins/modules/facter.py b/ansible_collections/community/general/plugins/modules/facter.py
index e7cf52e20..87017246a 100644
--- a/ansible_collections/community/general/plugins/modules/facter.py
+++ b/ansible_collections/community/general/plugins/modules/facter.py
@@ -11,7 +11,7 @@ __metaclass__ = type
DOCUMENTATION = '''
---
module: facter
-short_description: Runs the discovery program I(facter) on the remote system
+short_description: Runs the discovery program C(facter) on the remote system
description:
- Runs the C(facter) discovery program
(U(https://github.com/puppetlabs/facter)) on the remote system, returning
diff --git a/ansible_collections/community/general/plugins/modules/facter_facts.py b/ansible_collections/community/general/plugins/modules/facter_facts.py
new file mode 100644
index 000000000..abc3f87eb
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/facter_facts.py
@@ -0,0 +1,90 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2023, Alexei Znamensky
+# Copyright (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: facter_facts
+short_description: Runs the discovery program C(facter) on the remote system and return Ansible facts
+version_added: 8.0.0
+description:
+ - Runs the C(facter) discovery program
+ (U(https://github.com/puppetlabs/facter)) on the remote system, returning Ansible facts from the
+ JSON data that can be useful for inventory purposes.
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.facts
+ - community.general.attributes.facts_module
+options:
+ arguments:
+ description:
+ - Specifies arguments for facter.
+ type: list
+ elements: str
+requirements:
+ - facter
+ - ruby-json
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+'''
+
+EXAMPLES = '''
+- name: Execute facter no arguments
+ community.general.facter_facts:
+
+- name: Execute facter with arguments
+ community.general.facter_facts:
+ arguments:
+ - -p
+ - system_uptime
+ - timezone
+ - is_virtual
+'''
+
+RETURN = r'''
+ansible_facts:
+ description: Dictionary with one key C(facter).
+ returned: always
+ type: dict
+ contains:
+ facter:
+ description: Dictionary containing facts discovered in the remote system.
+ returned: always
+ type: dict
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ arguments=dict(type='list', elements='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ facter_path = module.get_bin_path(
+ 'facter',
+ opt_dirs=['/opt/puppetlabs/bin'])
+
+ cmd = [facter_path, "--json"]
+ if module.params['arguments']:
+ cmd += module.params['arguments']
+
+ rc, out, err = module.run_command(cmd, check_rc=True)
+ module.exit_json(ansible_facts=dict(facter=json.loads(out)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/filesize.py b/ansible_collections/community/general/plugins/modules/filesize.py
index b3eb90d61..83de68288 100644
--- a/ansible_collections/community/general/plugins/modules/filesize.py
+++ b/ansible_collections/community/general/plugins/modules/filesize.py
@@ -41,20 +41,20 @@ options:
description:
- Requested size of the file.
- The value is a number (either C(int) or C(float)) optionally followed
- by a multiplicative suffix, that can be one of C(B) (bytes), C(KB) or
- C(kB) (= 1000B), C(MB) or C(mB) (= 1000kB), C(GB) or C(gB) (= 1000MB),
- and so on for C(T), C(P), C(E), C(Z) and C(Y); or alternatively one of
- C(K), C(k) or C(KiB) (= 1024B); C(M), C(m) or C(MiB) (= 1024KiB);
- C(G), C(g) or C(GiB) (= 1024MiB); and so on.
+ by a multiplicative suffix, that can be one of V(B) (bytes), V(KB) or
+ V(kB) (= 1000B), V(MB) or V(mB) (= 1000kB), V(GB) or V(gB) (= 1000MB),
+ and so on for V(T), V(P), V(E), V(Z) and V(Y); or alternatively one of
+ V(K), V(k) or V(KiB) (= 1024B); V(M), V(m) or V(MiB) (= 1024KiB);
+ V(G), V(g) or V(GiB) (= 1024MiB); and so on.
- If the multiplicative suffix is not provided, the value is treated as
- an integer number of blocks of I(blocksize) bytes each (float values
+ an integer number of blocks of O(blocksize) bytes each (float values
are rounded to the closest integer).
- - When the I(size) value is equal to the current file size, does nothing.
- - When the I(size) value is bigger than the current file size, bytes from
- I(source) (if I(sparse) is not C(false)) are appended to the file
+ - When the O(size) value is equal to the current file size, does nothing.
+ - When the O(size) value is bigger than the current file size, bytes from
+ O(source) (if O(sparse) is not V(false)) are appended to the file
without truncating it, in other words, without modifying the existing
bytes of the file.
- - When the I(size) value is smaller than the current file size, it is
+ - When the O(size) value is smaller than the current file size, it is
truncated to the requested value without modifying bytes before this
value.
- That means that a file of any arbitrary size can be grown to any other
@@ -65,24 +65,24 @@ options:
blocksize:
description:
- Size of blocks, in bytes if not followed by a multiplicative suffix.
- - The numeric value (before the unit) C(MUST) be an integer (or a C(float)
+ - The numeric value (before the unit) B(MUST) be an integer (or a C(float)
if it equals an integer).
- If not set, the size of blocks is guessed from the OS and commonly
- results in C(512) or C(4096) bytes, that is used internally by the
- module or when I(size) has no unit.
+ results in V(512) or V(4096) bytes, that is used internally by the
+ module or when O(size) has no unit.
type: raw
source:
description:
- Device or file that provides input data to provision the file.
- - This parameter is ignored when I(sparse=true).
+ - This parameter is ignored when O(sparse=true).
type: path
default: /dev/zero
force:
description:
- Whether or not to overwrite the file if it exists, in other words, to
- truncate it from 0. When C(true), the module is not idempotent, that
- means it always reports I(changed=true).
- - I(force=true) and I(sparse=true) are mutually exclusive.
+ truncate it from 0. When V(true), the module is not idempotent, that
+ means it always reports C(changed=true).
+ - O(force=true) and O(sparse=true) are mutually exclusive.
type: bool
default: false
sparse:
@@ -91,7 +91,7 @@ options:
- This option is effective only on newly created files, or when growing a
file, only for the bytes to append.
- This option is not supported on OSes or filesystems not supporting sparse files.
- - I(force=true) and I(sparse=true) are mutually exclusive.
+ - O(force=true) and O(sparse=true) are mutually exclusive.
type: bool
default: false
unsafe_writes:
@@ -206,7 +206,7 @@ filesize:
type: int
sample: 1024
bytes:
- description: Size of the file, in bytes, as the product of C(blocks) and C(blocksize).
+ description: Size of the file, in bytes, as the product of RV(filesize.blocks) and RV(filesize.blocksize).
type: int
sample: 512000
iec:
diff --git a/ansible_collections/community/general/plugins/modules/filesystem.py b/ansible_collections/community/general/plugins/modules/filesystem.py
index 0e6b815b4..ec361245b 100644
--- a/ansible_collections/community/general/plugins/modules/filesystem.py
+++ b/ansible_collections/community/general/plugins/modules/filesystem.py
@@ -29,12 +29,12 @@ attributes:
options:
state:
description:
- - If I(state=present), the filesystem is created if it doesn't already
- exist, that is the default behaviour if I(state) is omitted.
- - If I(state=absent), filesystem signatures on I(dev) are wiped if it
+ - If O(state=present), the filesystem is created if it doesn't already
+ exist, that is the default behaviour if O(state) is omitted.
+ - If O(state=absent), filesystem signatures on O(dev) are wiped if it
contains a filesystem (as known by C(blkid)).
- - When I(state=absent), all other options but I(dev) are ignored, and the
- module doesn't fail if the device I(dev) doesn't actually exist.
+ - When O(state=absent), all other options but O(dev) are ignored, and the
+ module does not fail if the device O(dev) doesn't actually exist.
type: str
choices: [ present, absent ]
default: present
@@ -43,7 +43,7 @@ options:
choices: [ btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap, ufs ]
description:
- Filesystem type to be created. This option is required with
- I(state=present) (or if I(state) is omitted).
+ O(state=present) (or if O(state) is omitted).
- ufs support has been added in community.general 3.4.0.
type: str
aliases: [type]
@@ -53,50 +53,68 @@ options:
regular file (both).
- When setting Linux-specific filesystem types on FreeBSD, this module
only works when applying to regular files, aka disk images.
- - Currently C(lvm) (Linux-only) and C(ufs) (FreeBSD-only) don't support
- a regular file as their target I(dev).
+ - Currently V(lvm) (Linux-only) and V(ufs) (FreeBSD-only) do not support
+ a regular file as their target O(dev).
- Support for character devices on FreeBSD has been added in community.general 3.4.0.
type: path
required: true
aliases: [device]
force:
description:
- - If C(true), allows to create new filesystem on devices that already has filesystem.
+ - If V(true), allows to create new filesystem on devices that already has filesystem.
type: bool
default: false
resizefs:
description:
- - If C(true), if the block device and filesystem size differ, grow the filesystem into the space.
+ - If V(true), if the block device and filesystem size differ, grow the filesystem into the space.
- Supported for C(btrfs), C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs), C(ufs) and C(vfat) filesystems.
Attempts to resize other filesystem types will fail.
- XFS Will only grow if mounted. Currently, the module is based on commands
from C(util-linux) package to perform operations, so resizing of XFS is
not supported on FreeBSD systems.
- vFAT will likely fail if C(fatresize < 1.04).
+ - Mutually exclusive with O(uuid).
type: bool
default: false
opts:
description:
- List of options to be passed to C(mkfs) command.
type: str
+ uuid:
+ description:
+ - Set filesystem's UUID to the given value.
+ - The UUID options specified in O(opts) take precedence over this value.
+ - See xfs_admin(8) (C(xfs)), tune2fs(8) (C(ext2), C(ext3), C(ext4), C(ext4dev)) for possible values.
+ - For O(fstype=lvm) the value is ignored, it resets the PV UUID if set.
+ - Supported for O(fstype) being one of C(ext2), C(ext3), C(ext4), C(ext4dev), C(lvm), or C(xfs).
+ - This is B(not idempotent). Specifying this option will always result in a change.
+ - Mutually exclusive with O(resizefs).
+ type: str
+ version_added: 7.1.0
requirements:
- - Uses specific tools related to the I(fstype) for creating or resizing a
+ - Uses specific tools related to the O(fstype) for creating or resizing a
filesystem (from packages e2fsprogs, xfsprogs, dosfstools, and so on).
- Uses generic tools mostly related to the Operating System (Linux or
FreeBSD) or available on both, as C(blkid).
- On FreeBSD, either C(util-linux) or C(e2fsprogs) package is required.
notes:
- - Potential filesystems on I(dev) are checked using C(blkid). In case C(blkid)
+ - Potential filesystems on O(dev) are checked using C(blkid). In case C(blkid)
is unable to detect a filesystem (and in case C(fstyp) on FreeBSD is also
unable to detect a filesystem), this filesystem is overwritten even if
- I(force) is C(false).
+ O(force) is V(false).
- On FreeBSD systems, both C(e2fsprogs) and C(util-linux) packages provide
a C(blkid) command that is compatible with this module. However, these
packages conflict with each other, and only the C(util-linux) package
- provides the command required to not fail when I(state=absent).
+ provides the command required to not fail when O(state=absent).
seealso:
- module: community.general.filesize
- module: ansible.posix.mount
+ - name: xfs_admin(8) manpage for Linux
+ description: Manual page of the GNU/Linux's xfs_admin implementation
+ link: https://man7.org/linux/man-pages/man8/xfs_admin.8.html
+ - name: tune2fs(8) manpage for Linux
+ description: Manual page of the GNU/Linux's tune2fs implementation
+ link: https://man7.org/linux/man-pages/man8/tune2fs.8.html
'''
EXAMPLES = '''
@@ -120,6 +138,24 @@ EXAMPLES = '''
community.general.filesystem:
dev: /path/to/disk.img
fstype: vfat
+
+- name: Reset an xfs filesystem UUID on /dev/sdb1
+ community.general.filesystem:
+ fstype: xfs
+ dev: /dev/sdb1
+ uuid: generate
+
+- name: Reset an ext4 filesystem UUID on /dev/sdb1
+ community.general.filesystem:
+ fstype: ext4
+ dev: /dev/sdb1
+ uuid: random
+
+- name: Reset an LVM filesystem (PV) UUID on /dev/sdc
+ community.general.filesystem:
+ fstype: lvm
+ dev: /dev/sdc
+ uuid: random
'''
import os
@@ -178,10 +214,15 @@ class Filesystem(object):
MKFS = None
MKFS_FORCE_FLAGS = []
+ MKFS_SET_UUID_OPTIONS = None
+ MKFS_SET_UUID_EXTRA_OPTIONS = []
INFO = None
GROW = None
GROW_MAX_SPACE_FLAGS = []
GROW_MOUNTPOINT_ONLY = False
+ CHANGE_UUID = None
+ CHANGE_UUID_OPTION = None
+ CHANGE_UUID_OPTION_HAS_ARG = True
LANG_ENV = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}
@@ -200,13 +241,19 @@ class Filesystem(object):
"""
raise NotImplementedError()
- def create(self, opts, dev):
+ def create(self, opts, dev, uuid=None):
if self.module.check_mode:
return
+ if uuid and self.MKFS_SET_UUID_OPTIONS:
+ if not (set(self.MKFS_SET_UUID_OPTIONS) & set(opts)):
+ opts += [self.MKFS_SET_UUID_OPTIONS[0], uuid] + self.MKFS_SET_UUID_EXTRA_OPTIONS
+
mkfs = self.module.get_bin_path(self.MKFS, required=True)
cmd = [mkfs] + self.MKFS_FORCE_FLAGS + opts + [str(dev)]
self.module.run_command(cmd, check_rc=True)
+ if uuid and self.CHANGE_UUID and self.MKFS_SET_UUID_OPTIONS is None:
+ self.change_uuid(new_uuid=uuid, dev=dev)
def wipefs(self, dev):
if self.module.check_mode:
@@ -255,11 +302,31 @@ class Filesystem(object):
dummy, out, dummy = self.module.run_command(self.grow_cmd(grow_target), check_rc=True)
return out
+ def change_uuid_cmd(self, new_uuid, target):
+ """Build and return the UUID change command line as list."""
+ cmdline = [self.module.get_bin_path(self.CHANGE_UUID, required=True)]
+ if self.CHANGE_UUID_OPTION_HAS_ARG:
+ cmdline += [self.CHANGE_UUID_OPTION, new_uuid, target]
+ else:
+ cmdline += [self.CHANGE_UUID_OPTION, target]
+ return cmdline
+
+ def change_uuid(self, new_uuid, dev):
+ """Change filesystem UUID. Returns stdout of used command"""
+ if self.module.check_mode:
+ self.module.exit_json(change=True, msg='Changing %s filesystem UUID on device %s' % (self.fstype, dev))
+
+ dummy, out, dummy = self.module.run_command(self.change_uuid_cmd(new_uuid=new_uuid, target=str(dev)), check_rc=True)
+ return out
+
class Ext(Filesystem):
MKFS_FORCE_FLAGS = ['-F']
+ MKFS_SET_UUID_OPTIONS = ['-U']
INFO = 'tune2fs'
GROW = 'resize2fs'
+ CHANGE_UUID = 'tune2fs'
+ CHANGE_UUID_OPTION = "-U"
def get_fs_size(self, dev):
"""Get Block count and Block size and return their product."""
@@ -298,6 +365,8 @@ class XFS(Filesystem):
INFO = 'xfs_info'
GROW = 'xfs_growfs'
GROW_MOUNTPOINT_ONLY = True
+ CHANGE_UUID = "xfs_admin"
+ CHANGE_UUID_OPTION = "-U"
def get_fs_size(self, dev):
"""Get bsize and blocks and return their product."""
@@ -451,8 +520,13 @@ class VFAT(Filesystem):
class LVM(Filesystem):
MKFS = 'pvcreate'
MKFS_FORCE_FLAGS = ['-f']
+ MKFS_SET_UUID_OPTIONS = ['-u', '--uuid']
+ MKFS_SET_UUID_EXTRA_OPTIONS = ['--norestorefile']
INFO = 'pvs'
GROW = 'pvresize'
+ CHANGE_UUID = 'pvchange'
+ CHANGE_UUID_OPTION = '-u'
+ CHANGE_UUID_OPTION_HAS_ARG = False
def get_fs_size(self, dev):
"""Get and return PV size, in bytes."""
@@ -525,10 +599,14 @@ def main():
opts=dict(type='str'),
force=dict(type='bool', default=False),
resizefs=dict(type='bool', default=False),
+ uuid=dict(type='str', required=False),
),
required_if=[
('state', 'present', ['fstype'])
],
+ mutually_exclusive=[
+ ('resizefs', 'uuid'),
+ ],
supports_check_mode=True,
)
@@ -538,6 +616,7 @@ def main():
opts = module.params['opts']
force = module.params['force']
resizefs = module.params['resizefs']
+ uuid = module.params['uuid']
mkfs_opts = []
if opts is not None:
@@ -576,21 +655,30 @@ def main():
filesystem = klass(module)
+ if uuid and not (filesystem.CHANGE_UUID or filesystem.MKFS_SET_UUID_OPTIONS):
+ module.fail_json(changed=False, msg="module does not support UUID option for this filesystem (%s) yet." % fstype)
+
same_fs = fs and FILESYSTEMS.get(fs) == FILESYSTEMS[fstype]
- if same_fs and not resizefs and not force:
+ if same_fs and not resizefs and not uuid and not force:
module.exit_json(changed=False)
- elif same_fs and resizefs:
- if not filesystem.GROW:
- module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % fstype)
+ elif same_fs:
+ if resizefs:
+ if not filesystem.GROW:
+ module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % fstype)
+
+ out = filesystem.grow(dev)
+
+ module.exit_json(changed=True, msg=out)
+ elif uuid:
- out = filesystem.grow(dev)
+ out = filesystem.change_uuid(new_uuid=uuid, dev=dev)
- module.exit_json(changed=True, msg=out)
+ module.exit_json(changed=True, msg=out)
elif fs and not force:
module.fail_json(msg="'%s' is already used as %s, use force=true to overwrite" % (dev, fs), rc=rc, err=err)
# create fs
- filesystem.create(mkfs_opts, dev)
+ filesystem.create(opts=mkfs_opts, dev=dev, uuid=uuid)
changed = True
elif fs:
diff --git a/ansible_collections/community/general/plugins/modules/flatpak.py b/ansible_collections/community/general/plugins/modules/flatpak.py
index 40a13736f..80dbabdfa 100644
--- a/ansible_collections/community/general/plugins/modules/flatpak.py
+++ b/ansible_collections/community/general/plugins/modules/flatpak.py
@@ -39,8 +39,8 @@ options:
method:
description:
- The installation method to use.
- - Defines if the I(flatpak) is supposed to be installed globally for the whole C(system)
- or only for the current C(user).
+ - Defines if the C(flatpak) is supposed to be installed globally for the whole V(system)
+ or only for the current V(user).
type: str
choices: [ system, user ]
default: system
@@ -48,14 +48,14 @@ options:
description:
- The name of the flatpak to manage. To operate on several packages this
can accept a list of packages.
- - When used with I(state=present), I(name) can be specified as a URL to a
+ - When used with O(state=present), O(name) can be specified as a URL to a
C(flatpakref) file or the unique reverse DNS name that identifies a flatpak.
- Both C(https://) and C(http://) URLs are supported.
- - When supplying a reverse DNS name, you can use the I(remote) option to specify on what remote
+ - When supplying a reverse DNS name, you can use the O(remote) option to specify on what remote
to look for the flatpak. An example for a reverse DNS name is C(org.gnome.gedit).
- - When used with I(state=absent), it is recommended to specify the name in the reverse DNS
+ - When used with O(state=absent), it is recommended to specify the name in the reverse DNS
format.
- - When supplying a URL with I(state=absent), the module will try to match the
+ - When supplying a URL with O(state=absent), the module will try to match the
installed flatpak based on the name of the flatpakref to remove it. However, there is no
guarantee that the names of the flatpakref file and the reverse DNS name of the installed
flatpak do match.
@@ -74,7 +74,7 @@ options:
remote:
description:
- The flatpak remote (repository) to install the flatpak from.
- - By default, C(flathub) is assumed, but you do need to add the flathub flatpak_remote before
+ - By default, V(flathub) is assumed, but you do need to add the flathub flatpak_remote before
you can use this.
- See the M(community.general.flatpak_remote) module for managing flatpak remotes.
type: str
diff --git a/ansible_collections/community/general/plugins/modules/flatpak_remote.py b/ansible_collections/community/general/plugins/modules/flatpak_remote.py
index 9c097c411..a4eb3ea27 100644
--- a/ansible_collections/community/general/plugins/modules/flatpak_remote.py
+++ b/ansible_collections/community/general/plugins/modules/flatpak_remote.py
@@ -18,7 +18,7 @@ description:
- Allows users to add or remove flatpak remotes.
- The flatpak remotes concept is comparable to what is called repositories in other packaging
formats.
- - Currently, remote addition is only supported via I(flatpakrepo) file URLs.
+ - Currently, remote addition is only supported via C(flatpakrepo) file URLs.
- Existing remotes will not be updated.
- See the M(community.general.flatpak) module for managing flatpaks.
author:
@@ -42,26 +42,26 @@ options:
default: flatpak
flatpakrepo_url:
description:
- - The URL to the I(flatpakrepo) file representing the repository remote to add.
- - When used with I(state=present), the flatpak remote specified under the I(flatpakrepo_url)
- is added using the specified installation C(method).
- - When used with I(state=absent), this is not required.
- - Required when I(state=present).
+ - The URL to the C(flatpakrepo) file representing the repository remote to add.
+ - When used with O(state=present), the flatpak remote specified under the O(flatpakrepo_url)
+ is added using the specified installation O(method).
+ - When used with O(state=absent), this is not required.
+ - Required when O(state=present).
type: str
method:
description:
- The installation method to use.
- - Defines if the I(flatpak) is supposed to be installed globally for the whole C(system)
- or only for the current C(user).
+ - Defines if the C(flatpak) is supposed to be installed globally for the whole V(system)
+ or only for the current V(user).
type: str
choices: [ system, user ]
default: system
name:
description:
- The desired name for the flatpak remote to be registered under on the managed host.
- - When used with I(state=present), the remote will be added to the managed host under
- the specified I(name).
- - When used with I(state=absent) the remote with that name will be removed.
+ - When used with O(state=present), the remote will be added to the managed host under
+ the specified O(name).
+ - When used with O(state=absent) the remote with that name will be removed.
type: str
required: true
state:
diff --git a/ansible_collections/community/general/plugins/modules/flowdock.py b/ansible_collections/community/general/plugins/modules/flowdock.py
index c78716ba4..0e8a7461d 100644
--- a/ansible_collections/community/general/plugins/modules/flowdock.py
+++ b/ansible_collections/community/general/plugins/modules/flowdock.py
@@ -11,6 +11,12 @@ __metaclass__ = type
DOCUMENTATION = '''
---
+
+deprecated:
+ removed_in: 9.0.0
+ why: the endpoints this module relies on do not exist any more and do not resolve to IPs in DNS.
+ alternative: no known alternative at this point
+
module: flowdock
author: "Matt Coddington (@mcodd)"
short_description: Send a message to a flowdock
@@ -87,7 +93,7 @@ options:
required: false
validate_certs:
description:
- - If C(false), SSL certificates will not be validated. This should only be used
+ - If V(false), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: true
diff --git a/ansible_collections/community/general/plugins/modules/gandi_livedns.py b/ansible_collections/community/general/plugins/modules/gandi_livedns.py
index cc9dd630b..fdb7993a5 100644
--- a/ansible_collections/community/general/plugins/modules/gandi_livedns.py
+++ b/ansible_collections/community/general/plugins/modules/gandi_livedns.py
@@ -44,7 +44,7 @@ options:
ttl:
description:
- The TTL to give the new record.
- - Required when I(state=present).
+ - Required when O(state=present).
type: int
type:
description:
@@ -54,7 +54,7 @@ options:
values:
description:
- The record values.
- - Required when I(state=present).
+ - Required when O(state=present).
type: list
elements: str
domain:
diff --git a/ansible_collections/community/general/plugins/modules/gconftool2.py b/ansible_collections/community/general/plugins/modules/gconftool2.py
index 949e92b30..a40304a16 100644
--- a/ansible_collections/community/general/plugins/modules/gconftool2.py
+++ b/ansible_collections/community/general/plugins/modules/gconftool2.py
@@ -35,20 +35,20 @@ options:
type: str
description:
- Preference keys typically have simple values such as strings,
- integers, or lists of strings and integers. This is ignored if the state
- is "get". See man gconftool-2(1).
+ integers, or lists of strings and integers.
+ This is ignored unless O(state=present). See man gconftool-2(1).
value_type:
type: str
description:
- - The type of value being set. This is ignored if the state is "get".
+ - The type of value being set.
+ This is ignored unless O(state=present). See man gconftool-2(1).
choices: [ bool, float, int, string ]
state:
type: str
description:
- The action to take upon the key/value.
- - State C(get) is deprecated and will be removed in community.general 8.0.0. Please use the module M(community.general.gconftool2_info) instead.
required: true
- choices: [ absent, get, present ]
+ choices: [ absent, present ]
config_source:
type: str
description:
@@ -56,8 +56,8 @@ options:
See man gconftool-2(1).
direct:
description:
- - Access the config database directly, bypassing server. If direct is
- specified then the config_source must be specified as well.
+ - Access the config database directly, bypassing server. If O(direct) is
+ specified then the O(config_source) must be specified as well.
See man gconftool-2(1).
type: bool
default: false
@@ -73,17 +73,26 @@ EXAMPLES = """
RETURN = '''
key:
- description: The key specified in the module parameters
+ description: The key specified in the module parameters.
returned: success
type: str
sample: /desktop/gnome/interface/font_name
value_type:
- description: The type of the value that was changed
+ description: The type of the value that was changed.
returned: success
type: str
sample: string
value:
- description: The value of the preference key after executing the module
+ description:
+ - The value of the preference key after executing the module or V(null) if key is removed.
+ - From community.general 7.0.0 onwards it returns V(null) for a non-existent O(key), and returned V("") before that.
+ returned: success
+ type: str
+ sample: "Serif 12"
+ previous_value:
+ description:
+ - The value of the preference key before executing the module.
+ - From community.general 7.0.0 onwards it returns V(null) for a non-existent O(key), and returned V("") before that.
returned: success
type: str
sample: "Serif 12"
@@ -95,7 +104,6 @@ from ansible_collections.community.general.plugins.module_utils.gconftool2 impor
class GConftool(StateModuleHelper):
- change_params = ('value', )
diff_params = ('value', )
output_params = ('key', 'value_type')
facts_params = ('key', 'value_type')
@@ -105,13 +113,12 @@ class GConftool(StateModuleHelper):
key=dict(type='str', required=True, no_log=False),
value_type=dict(type='str', choices=['bool', 'float', 'int', 'string']),
value=dict(type='str'),
- state=dict(type='str', required=True, choices=['absent', 'get', 'present']),
+ state=dict(type='str', required=True, choices=['absent', 'present']),
direct=dict(type='bool', default=False),
config_source=dict(type='str'),
),
required_if=[
('state', 'present', ['value', 'value_type']),
- ('state', 'absent', ['value']),
('direct', True, ['config_source']),
],
supports_check_mode=True,
@@ -125,6 +132,7 @@ class GConftool(StateModuleHelper):
self.vars.set('previous_value', self._get(), fact=True)
self.vars.set('value_type', self.vars.value_type)
+ self.vars.set('_value', self.vars.previous_value, output=False, change=True)
self.vars.set_meta('value', initial_value=self.vars.previous_value)
self.vars.set('playbook_value', self.vars.value, fact=True)
@@ -132,27 +140,29 @@ class GConftool(StateModuleHelper):
def process(rc, out, err):
if err and fail_on_err:
self.ansible.fail_json(msg='gconftool-2 failed with error: %s' % (str(err)))
- self.vars.value = out.rstrip()
+ out = out.rstrip()
+ self.vars.value = None if out == "" else out
return self.vars.value
return process
def _get(self):
return self.runner("state key", output_process=self._make_process(False)).run(state="get")
- def state_get(self):
- self.deprecate(
- msg="State 'get' is deprecated. Please use the module community.general.gconftool2_info instead",
- version="8.0.0", collection_name="community.general"
- )
-
def state_absent(self):
with self.runner("state key", output_process=self._make_process(False)) as ctx:
ctx.run()
+ if self.verbosity >= 4:
+ self.vars.run_info = ctx.run_info
self.vars.set('new_value', None, fact=True)
+ self.vars._value = None
def state_present(self):
with self.runner("direct config_source value_type state key value", output_process=self._make_process(True)) as ctx:
- self.vars.set('new_value', ctx.run(), fact=True)
+ ctx.run()
+ if self.verbosity >= 4:
+ self.vars.run_info = ctx.run_info
+ self.vars.set('new_value', self._get(), fact=True)
+ self.vars._value = self.vars.new_value
def main():
diff --git a/ansible_collections/community/general/plugins/modules/gem.py b/ansible_collections/community/general/plugins/modules/gem.py
index 4bc99d39e..f51e3350d 100644
--- a/ansible_collections/community/general/plugins/modules/gem.py
+++ b/ansible_collections/community/general/plugins/modules/gem.py
@@ -31,7 +31,7 @@ options:
state:
type: str
description:
- - The desired state of the gem. C(latest) ensures that the latest version is installed.
+ - The desired state of the gem. V(latest) ensures that the latest version is installed.
required: false
choices: [present, absent, latest]
default: present
@@ -80,7 +80,7 @@ options:
default: true
description:
- Avoid loading any C(.gemrc) file. Ignored for RubyGems prior to 2.5.2.
- - The default changed from C(false) to C(true) in community.general 6.0.0.
+ - The default changed from V(false) to V(true) in community.general 6.0.0.
version_added: 3.3.0
env_shebang:
description:
diff --git a/ansible_collections/community/general/plugins/modules/gio_mime.py b/ansible_collections/community/general/plugins/modules/gio_mime.py
new file mode 100644
index 000000000..27f90581e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/gio_mime.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2022, Alexei Znamensky <russoz@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: gio_mime
+author:
+ - "Alexei Znamensky (@russoz)"
+short_description: Set default handler for MIME type, for applications using Gnome GIO
+version_added: 7.5.0
+description:
+ - This module allows configuring the default handler for a specific MIME type, to be used by applications built with th Gnome GIO API.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+options:
+ mime_type:
+ description:
+ - MIME type for which a default handler will be set.
+ type: str
+ required: true
+ handler:
+ description:
+ - Default handler will be set for the MIME type.
+ type: str
+ required: true
+notes:
+ - This module is a thin wrapper around the C(gio mime) command (and subcommand).
+ - See man gio(1) for more details.
+seealso:
+ - name: GIO Documentation
+ description: Reference documentation for the GIO API..
+ link: https://docs.gtk.org/gio/
+'''
+
+EXAMPLES = """
+- name: Set chrome as the default handler for https
+ community.general.gio_mime:
+ mime_type: x-scheme-handler/https
+ handler: google-chrome.desktop
+ register: result
+"""
+
+RETURN = '''
+ handler:
+ description:
+ - The handler set as default.
+ returned: success
+ type: str
+ sample: google-chrome.desktop
+ stdout:
+ description:
+ - The output of the C(gio) command.
+ returned: success
+ type: str
+ sample: Set google-chrome.desktop as the default for x-scheme-handler/https
+ stderr:
+ description:
+ - The error output of the C(gio) command.
+ returned: failure
+ type: str
+ sample: 'gio: Failed to load info for handler "never-existed.desktop"'
+'''
+
+from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper
+from ansible_collections.community.general.plugins.module_utils.gio_mime import gio_mime_runner, gio_mime_get
+
+
+class GioMime(ModuleHelper):
+ output_params = ['handler']
+ module = dict(
+ argument_spec=dict(
+ mime_type=dict(type='str', required=True),
+ handler=dict(type='str', required=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ def __init_module__(self):
+ self.runner = gio_mime_runner(self.module, check_rc=True)
+ self.vars.set_meta("handler", initial_value=gio_mime_get(self.runner, self.vars.mime_type), diff=True, change=True)
+
+ def __run__(self):
+ check_mode_return = (0, 'Module executed in check mode', '')
+ if self.vars.has_changed("handler"):
+ with self.runner.context(args_order=["mime_type", "handler"], check_mode_skip=True, check_mode_return=check_mode_return) as ctx:
+ rc, out, err = ctx.run()
+ self.vars.stdout = out
+ self.vars.stderr = err
+ if self.verbosity >= 4:
+ self.vars.run_info = ctx.run_info
+
+
+def main():
+ GioMime.execute()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/git_config.py b/ansible_collections/community/general/plugins/modules/git_config.py
index d67312174..a8d2ebe97 100644
--- a/ansible_collections/community/general/plugins/modules/git_config.py
+++ b/ansible_collections/community/general/plugins/modules/git_config.py
@@ -20,7 +20,7 @@ author:
requirements: ['git']
short_description: Read and write git configuration
description:
- - The C(git_config) module changes git configuration by invoking 'git config'.
+ - The M(community.general.git_config) module changes git configuration by invoking C(git config).
This is needed if you do not want to use M(ansible.builtin.template) for the entire git
config file (for example because you need to change just C(user.email) in
/etc/.git/config). Solutions involving M(ansible.builtin.command) are cumbersome or
@@ -35,7 +35,7 @@ attributes:
options:
list_all:
description:
- - List all settings (optionally limited to a given I(scope)).
+ - List all settings (optionally limited to a given O(scope)).
type: bool
default: false
name:
@@ -50,23 +50,23 @@ options:
type: path
file:
description:
- - Path to an adhoc git configuration file to be managed using the C(file) scope.
+ - Path to an adhoc git configuration file to be managed using the V(file) scope.
type: path
version_added: 2.0.0
scope:
description:
- Specify which scope to read/set values from.
- This is required when setting config values.
- - If this is set to C(local), you must also specify the C(repo) parameter.
- - If this is set to C(file), you must also specify the C(file) parameter.
- - It defaults to system only when not using I(list_all)=C(true).
+ - If this is set to V(local), you must also specify the O(repo) parameter.
+ - If this is set to V(file), you must also specify the O(file) parameter.
+ - It defaults to system only when not using O(list_all=true).
choices: [ "file", "local", "global", "system" ]
type: str
state:
description:
- "Indicates the setting should be set/unset.
- This parameter has higher precedence than I(value) parameter:
- when I(state)=absent and I(value) is defined, I(value) is discarded."
+ This parameter has higher precedence than O(value) parameter:
+ when O(state=absent) and O(value) is defined, O(value) is discarded."
choices: [ 'present', 'absent' ]
default: 'present'
type: str
@@ -75,6 +75,16 @@ options:
- When specifying the name of a single setting, supply a value to
set that setting to the given value.
type: str
+ add_mode:
+ description:
+ - Specify if a value should replace the existing value(s) or if the new
+ value should be added alongside other values with the same name.
+ - This option is only relevant when adding/replacing values. If O(state=absent) or
+ values are just read out, this option is not considered.
+ choices: [ "add", "replace-all" ]
+ type: str
+ default: "replace-all"
+ version_added: 8.1.0
'''
EXAMPLES = '''
@@ -118,6 +128,15 @@ EXAMPLES = '''
name: color.ui
value: auto
+- name: Add several options for the same name
+ community.general.git_config:
+ name: push.pushoption
+ value: "{{ item }}"
+ add_mode: add
+ loop:
+ - merge_request.create
+ - merge_request.draft
+
- name: Make etckeeper not complaining when it is invoked by cron
community.general.git_config:
name: user.email
@@ -152,13 +171,13 @@ EXAMPLES = '''
RETURN = '''
---
config_value:
- description: When I(list_all=false) and value is not set, a string containing the value of the setting in name
+ description: When O(list_all=false) and value is not set, a string containing the value of the setting in name
returned: success
type: str
sample: "vim"
config_values:
- description: When I(list_all=true), a dict containing key/value pairs of multiple configuration settings
+ description: When O(list_all=true), a dict containing key/value pairs of multiple configuration settings
returned: success
type: dict
sample:
@@ -178,6 +197,7 @@ def main():
name=dict(type='str'),
repo=dict(type='path'),
file=dict(type='path'),
+ add_mode=dict(required=False, type='str', default='replace-all', choices=['add', 'replace-all']),
scope=dict(required=False, type='str', choices=['file', 'local', 'global', 'system']),
state=dict(required=False, type='str', default='present', choices=['present', 'absent']),
value=dict(required=False),
@@ -197,94 +217,118 @@ def main():
# Set the locale to C to ensure consistent messages.
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
- if params['name']:
- name = params['name']
- else:
- name = None
+ name = params['name'] or ''
+ unset = params['state'] == 'absent'
+ new_value = params['value'] or ''
+ add_mode = params['add_mode']
- if params['scope']:
- scope = params['scope']
- elif params['list_all']:
- scope = None
- else:
- scope = 'system'
+ scope = determine_scope(params)
+ cwd = determine_cwd(scope, params)
- if params['state'] == 'absent':
- unset = 'unset'
- params['value'] = None
- else:
- unset = None
-
- if params['value']:
- new_value = params['value']
- else:
- new_value = None
+ base_args = [git_path, "config", "--includes"]
- args = [git_path, "config", "--includes"]
- if params['list_all']:
- args.append('-l')
if scope == 'file':
- args.append('-f')
- args.append(params['file'])
+ base_args.append('-f')
+ base_args.append(params['file'])
elif scope:
- args.append("--" + scope)
+ base_args.append("--" + scope)
+
+ list_args = list(base_args)
+
+ if params['list_all']:
+ list_args.append('-l')
+
if name:
- args.append(name)
+ list_args.append("--get-all")
+ list_args.append(name)
- if scope == 'local':
- dir = params['repo']
- elif params['list_all'] and params['repo']:
- # Include local settings from a specific repo when listing all available settings
- dir = params['repo']
- else:
- # Run from root directory to avoid accidentally picking up any local config settings
- dir = "/"
+ (rc, out, err) = module.run_command(list_args, cwd=cwd, expand_user_and_vars=False)
- (rc, out, err) = module.run_command(args, cwd=dir, expand_user_and_vars=False)
if params['list_all'] and scope and rc == 128 and 'unable to read config file' in err:
# This just means nothing has been set at the given scope
module.exit_json(changed=False, msg='', config_values={})
elif rc >= 2:
# If the return code is 1, it just means the option hasn't been set yet, which is fine.
- module.fail_json(rc=rc, msg=err, cmd=' '.join(args))
+ module.fail_json(rc=rc, msg=err, cmd=' '.join(list_args))
+
+ old_values = out.rstrip().splitlines()
if params['list_all']:
- values = out.rstrip().splitlines()
config_values = {}
- for value in values:
+ for value in old_values:
k, v = value.split('=', 1)
config_values[k] = v
module.exit_json(changed=False, msg='', config_values=config_values)
elif not new_value and not unset:
- module.exit_json(changed=False, msg='', config_value=out.rstrip())
+ module.exit_json(changed=False, msg='', config_value=old_values[0] if old_values else '')
elif unset and not out:
module.exit_json(changed=False, msg='no setting to unset')
+ elif new_value in old_values and (len(old_values) == 1 or add_mode == "add"):
+ module.exit_json(changed=False, msg="")
+
+ # Until this point, the git config was just read and in case no change is needed, the module has already exited.
+
+ set_args = list(base_args)
+ if unset:
+ set_args.append("--unset-all")
+ set_args.append(name)
else:
- old_value = out.rstrip()
- if old_value == new_value:
- module.exit_json(changed=False, msg="")
+ set_args.append("--" + add_mode)
+ set_args.append(name)
+ set_args.append(new_value)
if not module.check_mode:
- if unset:
- args.insert(len(args) - 1, "--" + unset)
- cmd = args
- else:
- cmd = args + [new_value]
- (rc, out, err) = module.run_command(cmd, cwd=dir, ignore_invalid_cwd=False, expand_user_and_vars=False)
+ (rc, out, err) = module.run_command(set_args, cwd=cwd, ignore_invalid_cwd=False, expand_user_and_vars=False)
if err:
- module.fail_json(rc=rc, msg=err, cmd=cmd)
+ module.fail_json(rc=rc, msg=err, cmd=set_args)
+
+ if unset:
+ after_values = []
+ elif add_mode == "add":
+ after_values = old_values + [new_value]
+ else:
+ after_values = [new_value]
module.exit_json(
msg='setting changed',
diff=dict(
- before_header=' '.join(args),
- before=old_value + "\n",
- after_header=' '.join(args),
- after=(new_value or '') + "\n"
+ before_header=' '.join(set_args),
+ before=build_diff_value(old_values),
+ after_header=' '.join(set_args),
+ after=build_diff_value(after_values),
),
changed=True
)
+def determine_scope(params):
+ if params['scope']:
+ return params['scope']
+ elif params['list_all']:
+ return ""
+ else:
+ return 'system'
+
+
+def build_diff_value(value):
+ if not value:
+ return "\n"
+ elif len(value) == 1:
+ return value[0] + "\n"
+ else:
+ return value
+
+
+def determine_cwd(scope, params):
+ if scope == 'local':
+ return params['repo']
+ elif params['list_all'] and params['repo']:
+ # Include local settings from a specific repo when listing all available settings
+ return params['repo']
+ else:
+ # Run from root directory to avoid accidentally picking up any local config settings
+ return "/"
+
+
if __name__ == '__main__':
main()
diff --git a/ansible_collections/community/general/plugins/modules/git_config_info.py b/ansible_collections/community/general/plugins/modules/git_config_info.py
new file mode 100644
index 000000000..147201fff
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/git_config_info.py
@@ -0,0 +1,187 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2023, Guenther Grill <grill.guenther@gmail.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: git_config_info
+author:
+ - Guenther Grill (@guenhter)
+version_added: 8.1.0
+requirements: ['git']
+short_description: Read git configuration
+description:
+ - The M(community.general.git_config_info) module reads the git configuration
+ by invoking C(git config).
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.info_module
+options:
+ name:
+ description:
+ - The name of the setting to read.
+ - If not provided, all settings will be returned as RV(config_values).
+ type: str
+ path:
+ description:
+ - Path to a git repository or file for reading values from a specific repo.
+ - If O(scope) is V(local), this must point to a repository to read from.
+ - If O(scope) is V(file), this must point to specific git config file to read from.
+ - Otherwise O(path) is ignored if set.
+ type: path
+ scope:
+ description:
+ - Specify which scope to read values from.
+ - If set to V(global), the global git config is used. O(path) is ignored.
+ - If set to V(system), the system git config is used. O(path) is ignored.
+ - If set to V(local), O(path) must be set to the repo to read from.
+ - If set to V(file), O(path) must be set to the config file to read from.
+ choices: [ "global", "system", "local", "file" ]
+ default: "system"
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Read a system wide config
+ community.general.git_config_info:
+ name: core.editor
+ register: result
+
+- name: Show value of core.editor
+ ansible.builtin.debug:
+ msg: "{{ result.config_value | default('(not set)', true) }}"
+
+- name: Read a global config from ~/.gitconfig
+ community.general.git_config_info:
+ name: alias.remotev
+ scope: global
+
+- name: Read a project specific config
+ community.general.git_config_info:
+ name: color.ui
+ scope: local
+ path: /etc
+
+- name: Read all global values
+ community.general.git_config_info:
+ scope: global
+
+- name: Read all system wide values
+ community.general.git_config_info:
+
+- name: Read all values of a specific file
+ community.general.git_config_info:
+ scope: file
+ path: /etc/gitconfig
+'''
+
+RETURN = '''
+---
+config_value:
+ description: >
+ When O(name) is set, a string containing the value of the setting in name. If O(name) is not set, empty.
+ If a config key such as V(push.pushoption) has more then one entry, just the first one is returned here.
+ returned: success if O(name) is set
+ type: str
+ sample: "vim"
+
+config_values:
+ description:
+ - This is a dictionary mapping a git configuration setting to a list of its values.
+ - When O(name) is not set, all configuration settings are returned here.
+ - When O(name) is set, only the setting specified in O(name) is returned here.
+ If that setting is not set, the key will still be present, and its value will be an empty list.
+ returned: success
+ type: dict
+ sample:
+ core.editor: ["vim"]
+ color.ui: ["auto"]
+ push.pushoption: ["merge_request.create", "merge_request.draft"]
+ alias.remotev: ["remote -v"]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type="str"),
+ path=dict(type="path"),
+ scope=dict(required=False, type="str", default="system", choices=["global", "system", "local", "file"]),
+ ),
+ required_if=[
+ ("scope", "local", ["path"]),
+ ("scope", "file", ["path"]),
+ ],
+ required_one_of=[],
+ supports_check_mode=True,
+ )
+
+ # We check error message for a pattern, so we need to make sure the messages appear in the form we're expecting.
+ # Set the locale to C to ensure consistent messages.
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ name = module.params["name"]
+ path = module.params["path"]
+ scope = module.params["scope"]
+
+ run_cwd = path if scope == "local" else "/"
+ args = build_args(module, name, path, scope)
+
+ (rc, out, err) = module.run_command(args, cwd=run_cwd, expand_user_and_vars=False)
+
+ if rc == 128 and "unable to read config file" in err:
+ # This just means nothing has been set at the given scope
+ pass
+ elif rc >= 2:
+ # If the return code is 1, it just means the option hasn't been set yet, which is fine.
+ module.fail_json(rc=rc, msg=err, cmd=" ".join(args))
+
+ output_lines = out.strip("\0").split("\0") if out else []
+
+ if name:
+ first_value = output_lines[0] if output_lines else ""
+ config_values = {name: output_lines}
+ module.exit_json(changed=False, msg="", config_value=first_value, config_values=config_values)
+ else:
+ config_values = text_to_dict(output_lines)
+ module.exit_json(changed=False, msg="", config_value="", config_values=config_values)
+
+
+def build_args(module, name, path, scope):
+ git_path = module.get_bin_path("git", True)
+ args = [git_path, "config", "--includes", "--null", "--" + scope]
+
+ if scope == "file":
+ args.append(path)
+
+ if name:
+ args.extend(["--get-all", name])
+ else:
+ args.append("--list")
+
+ return args
+
+
+def text_to_dict(text_lines):
+ config_values = {}
+ for value in text_lines:
+ k, v = value.split("\n", 1)
+ if k in config_values:
+ config_values[k].append(v)
+ else:
+ config_values[k] = [v]
+ return config_values
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/github_deploy_key.py b/ansible_collections/community/general/plugins/modules/github_deploy_key.py
index 322650bf7..ae90e04c9 100644
--- a/ansible_collections/community/general/plugins/modules/github_deploy_key.py
+++ b/ansible_collections/community/general/plugins/modules/github_deploy_key.py
@@ -58,7 +58,7 @@ options:
type: str
read_only:
description:
- - If C(true), the deploy key will only be able to read repository contents. Otherwise, the deploy key will be able to read and write.
+ - If V(true), the deploy key will only be able to read repository contents. Otherwise, the deploy key will be able to read and write.
type: bool
default: true
state:
@@ -69,7 +69,7 @@ options:
type: str
force:
description:
- - If C(true), forcefully adds the deploy key by deleting any existing deploy key with the same public key or title.
+ - If V(true), forcefully adds the deploy key by deleting any existing deploy key with the same public key or title.
type: bool
default: false
username:
@@ -78,15 +78,15 @@ options:
type: str
password:
description:
- - The password to authenticate with. Alternatively, a personal access token can be used instead of I(username) and I(password) combination.
+ - The password to authenticate with. Alternatively, a personal access token can be used instead of O(username) and O(password) combination.
type: str
token:
description:
- - The OAuth2 token or personal access token to authenticate with. Mutually exclusive with I(password).
+ - The OAuth2 token or personal access token to authenticate with. Mutually exclusive with O(password).
type: str
otp:
description:
- - The 6 digit One Time Password for 2-Factor Authentication. Required together with I(username) and I(password).
+ - The 6 digit One Time Password for 2-Factor Authentication. Required together with O(username) and O(password).
type: int
notes:
- "Refer to GitHub's API documentation here: https://developer.github.com/v3/repos/keys/."
@@ -227,7 +227,7 @@ class GithubDeployKey(object):
yield self.module.from_json(resp.read())
links = {}
- for x, y in findall(r'<([^>]+)>;\s*rel="(\w+)"', info["link"]):
+ for x, y in findall(r'<([^>]+)>;\s*rel="(\w+)"', info.get("link", '')):
links[y] = x
url = links.get('next')
diff --git a/ansible_collections/community/general/plugins/modules/github_key.py b/ansible_collections/community/general/plugins/modules/github_key.py
index 683a963a7..fa3a0a01f 100644
--- a/ansible_collections/community/general/plugins/modules/github_key.py
+++ b/ansible_collections/community/general/plugins/modules/github_key.py
@@ -34,7 +34,7 @@ options:
type: str
pubkey:
description:
- - SSH public key value. Required when I(state=present).
+ - SSH public key value. Required when O(state=present).
type: str
state:
description:
@@ -44,9 +44,9 @@ options:
type: str
force:
description:
- - The default is C(true), which will replace the existing remote key
- if it's different than C(pubkey). If C(false), the key will only be
- set if no key with the given I(name) exists.
+ - The default is V(true), which will replace the existing remote key
+ if it is different than O(pubkey). If V(false), the key will only be
+ set if no key with the given O(name) exists.
type: bool
default: true
@@ -82,8 +82,14 @@ EXAMPLES = '''
name: Access Key for Some Machine
token: '{{ github_access_token }}'
pubkey: '{{ ssh_pub_key.stdout }}'
-'''
+# Alternatively, a single task can be used reading a key from a file on the controller
+- name: Authorize key with GitHub
+ community.general.github_key:
+ name: Access Key for Some Machine
+ token: '{{ github_access_token }}'
+ pubkey: "{{ lookup('ansible.builtin.file', '/home/foo/.ssh/id_rsa.pub') }}"
+'''
import json
import re
diff --git a/ansible_collections/community/general/plugins/modules/github_release.py b/ansible_collections/community/general/plugins/modules/github_release.py
index 3ddd6c882..d8ee155b8 100644
--- a/ansible_collections/community/general/plugins/modules/github_release.py
+++ b/ansible_collections/community/general/plugins/modules/github_release.py
@@ -25,7 +25,7 @@ attributes:
options:
token:
description:
- - GitHub Personal Access Token for authenticating. Mutually exclusive with C(password).
+ - GitHub Personal Access Token for authenticating. Mutually exclusive with O(password).
type: str
user:
description:
@@ -34,7 +34,7 @@ options:
required: true
password:
description:
- - The GitHub account password for the user. Mutually exclusive with C(token).
+ - The GitHub account password for the user. Mutually exclusive with O(token).
type: str
repo:
description:
@@ -49,7 +49,7 @@ options:
choices: [ 'latest_release', 'create_release' ]
tag:
description:
- - Tag name when creating a release. Required when using action is set to C(create_release).
+ - Tag name when creating a release. Required when using O(action=create_release).
type: str
target:
description:
@@ -94,7 +94,7 @@ EXAMPLES = '''
repo: testrepo
action: latest_release
-- name: Get latest release of test repo using username and password. Ansible 2.4.
+- name: Get latest release of test repo using username and password
community.general.github_release:
user: testuser
password: secret123
diff --git a/ansible_collections/community/general/plugins/modules/github_repo.py b/ansible_collections/community/general/plugins/modules/github_repo.py
index 97076c58a..f02ad30ac 100644
--- a/ansible_collections/community/general/plugins/modules/github_repo.py
+++ b/ansible_collections/community/general/plugins/modules/github_repo.py
@@ -15,7 +15,7 @@ short_description: Manage your repositories on Github
version_added: 2.2.0
description:
- Manages Github repositories using PyGithub library.
- - Authentication can be done with I(access_token) or with I(username) and I(password).
+ - Authentication can be done with O(access_token) or with O(username) and O(password).
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -27,19 +27,19 @@ options:
username:
description:
- Username used for authentication.
- - This is only needed when not using I(access_token).
+ - This is only needed when not using O(access_token).
type: str
required: false
password:
description:
- Password used for authentication.
- - This is only needed when not using I(access_token).
+ - This is only needed when not using O(access_token).
type: str
required: false
access_token:
description:
- Token parameter for authentication.
- - This is only needed when not using I(username) and I(password).
+ - This is only needed when not using O(username) and O(password).
type: str
required: false
name:
@@ -50,17 +50,17 @@ options:
description:
description:
- Description for the repository.
- - Defaults to empty if I(force_defaults=true), which is the default in this module.
- - Defaults to empty if I(force_defaults=false) when creating a new repository.
- - This is only used when I(state) is C(present).
+ - Defaults to empty if O(force_defaults=true), which is the default in this module.
+ - Defaults to empty if O(force_defaults=false) when creating a new repository.
+ - This is only used when O(state) is V(present).
type: str
required: false
private:
description:
- Whether the repository should be private or not.
- - Defaults to C(false) if I(force_defaults=true), which is the default in this module.
- - Defaults to C(false) if I(force_defaults=false) when creating a new repository.
- - This is only used when I(state) is C(present).
+ - Defaults to V(false) if O(force_defaults=true), which is the default in this module.
+ - Defaults to V(false) if O(force_defaults=false) when creating a new repository.
+ - This is only used when O(state=present).
type: bool
required: false
state:
@@ -73,7 +73,7 @@ options:
organization:
description:
- Organization for the repository.
- - When I(state) is C(present), the repository will be created in the current user profile.
+ - When O(state=present), the repository will be created in the current user profile.
type: str
required: false
api_url:
@@ -84,8 +84,8 @@ options:
version_added: "3.5.0"
force_defaults:
description:
- - Overwrite current I(description) and I(private) attributes with defaults if set to C(true), which currently is the default.
- - The default for this option will be deprecated in a future version of this collection, and eventually change to C(false).
+ - Overwrite current O(description) and O(private) attributes with defaults if set to V(true), which currently is the default.
+ - The default for this option will be deprecated in a future version of this collection, and eventually change to V(false).
type: bool
default: true
required: false
@@ -125,7 +125,7 @@ EXAMPLES = '''
RETURN = '''
repo:
description: Repository information as JSON. See U(https://docs.github.com/en/rest/reference/repos#get-a-repository).
- returned: success and I(state) is C(present)
+ returned: success and O(state=present)
type: dict
'''
diff --git a/ansible_collections/community/general/plugins/modules/github_webhook.py b/ansible_collections/community/general/plugins/modules/github_webhook.py
index d47b7a82f..11b115750 100644
--- a/ansible_collections/community/general/plugins/modules/github_webhook.py
+++ b/ansible_collections/community/general/plugins/modules/github_webhook.py
@@ -61,7 +61,7 @@ options:
- >
A list of GitHub events the hook is triggered for. Events are listed at
U(https://developer.github.com/v3/activity/events/types/). Required
- unless C(state) is C(absent)
+ unless O(state=absent)
required: false
type: list
elements: str
diff --git a/ansible_collections/community/general/plugins/modules/github_webhook_info.py b/ansible_collections/community/general/plugins/modules/github_webhook_info.py
index a6f7c3e52..dcad02a36 100644
--- a/ansible_collections/community/general/plugins/modules/github_webhook_info.py
+++ b/ansible_collections/community/general/plugins/modules/github_webhook_info.py
@@ -14,7 +14,6 @@ module: github_webhook_info
short_description: Query information about GitHub webhooks
description:
- "Query information about GitHub webhooks"
- - This module was called C(github_webhook_facts) before Ansible 2.9. The usage did not change.
requirements:
- "PyGithub >= 1.3.5"
extends_documentation_fragment:
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_branch.py b/ansible_collections/community/general/plugins/modules/gitlab_branch.py
index d7eecb33f..623c25644 100644
--- a/ansible_collections/community/general/plugins/modules/gitlab_branch.py
+++ b/ansible_collections/community/general/plugins/modules/gitlab_branch.py
@@ -16,7 +16,6 @@ description:
author:
- paytroff (@paytroff)
requirements:
- - python >= 2.7
- python-gitlab >= 2.3.0
extends_documentation_fragment:
- community.general.auth_basic
@@ -49,7 +48,7 @@ options:
ref_branch:
description:
- Reference branch to create from.
- - This must be specified if I(state=present).
+ - This must be specified if O(state=present).
type: str
'''
@@ -84,7 +83,7 @@ from ansible.module_utils.api import basic_auth_argument_spec
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
from ansible_collections.community.general.plugins.module_utils.gitlab import (
- auth_argument_spec, gitlab_authentication, gitlab, ensure_gitlab_package
+ auth_argument_spec, gitlab_authentication, gitlab
)
@@ -144,7 +143,9 @@ def main():
],
supports_check_mode=False
)
- ensure_gitlab_package(module)
+
+ # check prerequisites and connect to gitlab server
+ gitlab_instance = gitlab_authentication(module)
project = module.params['project']
branch = module.params['branch']
@@ -156,7 +157,6 @@ def main():
module.fail_json(msg="community.general.gitlab_proteched_branch requires python-gitlab Python module >= 2.3.0 (installed version: [%s])."
" Please upgrade python-gitlab to version 2.3.0 or above." % gitlab_version)
- gitlab_instance = gitlab_authentication(module)
this_gitlab = GitlabBranch(module=module, project=project, gitlab_instance=gitlab_instance)
this_branch = this_gitlab.get_branch(branch)
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_deploy_key.py b/ansible_collections/community/general/plugins/modules/gitlab_deploy_key.py
index 27cb01f87..7c0ff06b7 100644
--- a/ansible_collections/community/general/plugins/modules/gitlab_deploy_key.py
+++ b/ansible_collections/community/general/plugins/modules/gitlab_deploy_key.py
@@ -20,7 +20,6 @@ author:
- Marcus Watkins (@marwatk)
- Guillaume Martinez (@Lunik)
requirements:
- - python >= 2.7
- python-gitlab python module
extends_documentation_fragment:
- community.general.auth_basic
@@ -56,8 +55,8 @@ options:
default: false
state:
description:
- - When C(present) the deploy key added to the project if it doesn't exist.
- - When C(absent) it will be removed from the project if it exists.
+ - When V(present) the deploy key added to the project if it doesn't exist.
+ - When V(absent) it will be removed from the project if it exists.
default: present
type: str
choices: [ "present", "absent" ]
@@ -121,7 +120,7 @@ from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.general.plugins.module_utils.gitlab import (
- auth_argument_spec, find_project, gitlab_authentication, gitlab, ensure_gitlab_package
+ auth_argument_spec, find_project, gitlab_authentication, gitlab, list_all_kwargs
)
@@ -209,8 +208,7 @@ class GitLabDeployKey(object):
@param key_title Title of the key
'''
def find_deploy_key(self, project, key_title):
- deploy_keys = project.keys.list(all=True)
- for deploy_key in deploy_keys:
+ for deploy_key in project.keys.list(**list_all_kwargs):
if (deploy_key.title == key_title):
return deploy_key
@@ -261,7 +259,9 @@ def main():
],
supports_check_mode=True,
)
- ensure_gitlab_package(module)
+
+ # check prerequisites and connect to gitlab server
+ gitlab_instance = gitlab_authentication(module)
state = module.params['state']
project_identifier = module.params['project']
@@ -269,8 +269,6 @@ def main():
key_keyfile = module.params['key']
key_can_push = module.params['can_push']
- gitlab_instance = gitlab_authentication(module)
-
gitlab_deploy_key = GitLabDeployKey(module, gitlab_instance)
project = find_project(gitlab_instance, project_identifier)
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_group.py b/ansible_collections/community/general/plugins/modules/gitlab_group.py
index 4de1ffc5f..3d57b1852 100644
--- a/ansible_collections/community/general/plugins/modules/gitlab_group.py
+++ b/ansible_collections/community/general/plugins/modules/gitlab_group.py
@@ -20,7 +20,6 @@ author:
- Werner Dijkerman (@dj-wasabi)
- Guillaume Martinez (@Lunik)
requirements:
- - python >= 2.7
- python-gitlab python module
extends_documentation_fragment:
- community.general.auth_basic
@@ -94,6 +93,13 @@ options:
- This option is only used on creation, not for updates.
type: path
version_added: 4.2.0
+ force_delete:
+ description:
+ - Force delete group even if projects in it.
+ - Used only when O(state=absent).
+ type: bool
+ default: false
+ version_added: 7.5.0
'''
EXAMPLES = '''
@@ -101,7 +107,6 @@ EXAMPLES = '''
community.general.gitlab_group:
api_url: https://gitlab.example.com/
api_token: "{{ access_token }}"
- validate_certs: false
name: my_first_group
state: absent
@@ -171,7 +176,7 @@ from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.general.plugins.module_utils.gitlab import (
- auth_argument_spec, find_group, gitlab_authentication, gitlab, ensure_gitlab_package
+ auth_argument_spec, find_group, gitlab_authentication, gitlab
)
@@ -279,12 +284,18 @@ class GitLabGroup(object):
return (changed, group)
- def delete_group(self):
+ '''
+ @param force To delete even if projects inside
+ '''
+ def delete_group(self, force=False):
group = self.group_object
- if len(group.projects.list(all=False)) >= 1:
+ if not force and len(group.projects.list(all=False)) >= 1:
self._module.fail_json(
- msg="There are still projects in this group. These needs to be moved or deleted before this group can be removed.")
+ msg=("There are still projects in this group. "
+ "These needs to be moved or deleted before this group can be removed. "
+ "Use 'force_delete' to 'true' to force deletion of existing projects.")
+ )
else:
if self._module.check_mode:
return True
@@ -295,7 +306,7 @@ class GitLabGroup(object):
self._module.fail_json(msg="Failed to delete group: %s " % to_native(e))
'''
- @param name Name of the groupe
+ @param name Name of the group
@param full_path Complete path of the Group including parent group path. <parent_path>/<group_path>
'''
def exists_group(self, project_identifier):
@@ -322,6 +333,7 @@ def main():
subgroup_creation_level=dict(type='str', choices=['maintainer', 'owner']),
require_two_factor_authentication=dict(type='bool'),
avatar_path=dict(type='path'),
+ force_delete=dict(type='bool', default=False),
))
module = AnsibleModule(
@@ -341,7 +353,9 @@ def main():
],
supports_check_mode=True,
)
- ensure_gitlab_package(module)
+
+ # check prerequisites and connect to gitlab server
+ gitlab_instance = gitlab_authentication(module)
group_name = module.params['name']
group_path = module.params['path']
@@ -354,8 +368,7 @@ def main():
subgroup_creation_level = module.params['subgroup_creation_level']
require_two_factor_authentication = module.params['require_two_factor_authentication']
avatar_path = module.params['avatar_path']
-
- gitlab_instance = gitlab_authentication(module)
+ force_delete = module.params['force_delete']
# Define default group_path based on group_name
if group_path is None:
@@ -375,7 +388,7 @@ def main():
if state == 'absent':
if group_exists:
- gitlab_group.delete_group()
+ gitlab_group.delete_group(force=force_delete)
module.exit_json(changed=True, msg="Successfully deleted group %s" % group_name)
else:
module.exit_json(changed=False, msg="Group deleted or does not exists")
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_group_access_token.py b/ansible_collections/community/general/plugins/modules/gitlab_group_access_token.py
new file mode 100644
index 000000000..85bba205d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/gitlab_group_access_token.py
@@ -0,0 +1,320 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2024, Zoran Krleza (zoran.krleza@true-north.hr)
+# Based on code:
+# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright (c) 2018, Marcus Watkins <marwatk@marcuswatkins.net>
+# Copyright (c) 2013, Phillip Gentry <phillip@cx.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: gitlab_group_access_token
+short_description: Manages GitLab group access tokens
+version_added: 8.4.0
+description:
+ - Creates and revokes group access tokens.
+author:
+ - Zoran Krleza (@pixslx)
+requirements:
+ - python-gitlab >= 3.1.0
+extends_documentation_fragment:
+ - community.general.auth_basic
+ - community.general.gitlab
+ - community.general.attributes
+notes:
+ - Access tokens can not be changed. If a parameter needs to be changed, an acceess token has to be recreated.
+ Whether tokens will be recreated is controlled by the O(recreate) option, which defaults to V(never).
+ - Token string is contained in the result only when access token is created or recreated. It can not be fetched afterwards.
+ - Token matching is done by comparing O(name) option.
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ group:
+ description:
+ - ID or full path of group in the form of group/subgroup.
+ required: true
+ type: str
+ name:
+ description:
+ - Access token's name.
+ required: true
+ type: str
+ scopes:
+ description:
+ - Scope of the access token.
+ required: true
+ type: list
+ elements: str
+ aliases: ["scope"]
+ choices: ["api", "read_api", "read_registry", "write_registry", "read_repository", "write_repository", "create_runner", "ai_features", "k8s_proxy"]
+ access_level:
+ description:
+ - Access level of the access token.
+ type: str
+ default: maintainer
+ choices: ["guest", "reporter", "developer", "maintainer", "owner"]
+ expires_at:
+ description:
+ - Expiration date of the access token in C(YYYY-MM-DD) format.
+ - Make sure to quote this value in YAML to ensure it is kept as a string and not interpreted as a YAML date.
+ type: str
+ required: true
+ recreate:
+ description:
+ - Whether the access token will be recreated if it already exists.
+ - When V(never) the token will never be recreated.
+ - When V(always) the token will always be recreated.
+ - When V(state_change) the token will be recreated if there is a difference between desired state and actual state.
+ type: str
+ choices: ["never", "always", "state_change"]
+ default: never
+ state:
+ description:
+ - When V(present) the access token will be added to the group if it does not exist.
+ - When V(absent) it will be removed from the group if it exists.
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+'''
+
+EXAMPLES = r'''
+- name: "Creating a group access token"
+ community.general.gitlab_group_access_token:
+ api_url: https://gitlab.example.com/
+ api_token: "somegitlabapitoken"
+ group: "my_group/my_subgroup"
+ name: "group_token"
+ expires_at: "2024-12-31"
+ access_level: developer
+ scopes:
+ - api
+ - read_api
+ - read_repository
+ - write_repository
+ state: present
+
+- name: "Revoking a group access token"
+ community.general.gitlab_group_access_token:
+ api_url: https://gitlab.example.com/
+ api_token: "somegitlabapitoken"
+ group: "my_group/my_group"
+ name: "group_token"
+ expires_at: "2024-12-31"
+ scopes:
+ - api
+ - read_api
+ - read_repository
+ - write_repository
+ state: absent
+
+- name: "Change (recreate) existing token if its actual state is different than desired state"
+ community.general.gitlab_group_access_token:
+ api_url: https://gitlab.example.com/
+ api_token: "somegitlabapitoken"
+ group: "my_group/my_group"
+ name: "group_token"
+ expires_at: "2024-12-31"
+ scopes:
+ - api
+ - read_api
+ - read_repository
+ - write_repository
+ recreate: state_change
+ state: present
+'''
+
+RETURN = r'''
+access_token:
+ description:
+ - API object.
+ - Only contains the value of the token if the token was created or recreated.
+ returned: success and O(state=present)
+ type: dict
+'''
+
+from datetime import datetime
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import (
+ auth_argument_spec, find_group, gitlab_authentication, gitlab
+)
+
+ACCESS_LEVELS = dict(guest=10, reporter=20, developer=30, maintainer=40, owner=50)
+
+
+class GitLabGroupAccessToken(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.access_token_object = None
+
+ '''
+ @param project Project Object
+ @param group Group Object
+ @param arguments Attributes of the access_token
+ '''
+ def create_access_token(self, group, arguments):
+ changed = False
+ if self._module.check_mode:
+ return True
+
+ try:
+ self.access_token_object = group.access_tokens.create(arguments)
+ changed = True
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to create access token: %s " % to_native(e))
+
+ return changed
+
+ '''
+ @param project Project object
+ @param group Group Object
+ @param name of the access token
+ '''
+ def find_access_token(self, group, name):
+ access_tokens = group.access_tokens.list(all=True)
+ for access_token in access_tokens:
+ if (access_token.name == name):
+ self.access_token_object = access_token
+ return False
+ return False
+
+ def revoke_access_token(self):
+ if self._module.check_mode:
+ return True
+
+ changed = False
+ try:
+ self.access_token_object.delete()
+ changed = True
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to revoke access token: %s " % to_native(e))
+
+ return changed
+
+ def access_tokens_equal(self):
+ if self.access_token_object.name != self._module.params['name']:
+ return False
+ if self.access_token_object.scopes != self._module.params['scopes']:
+ return False
+ if self.access_token_object.access_level != ACCESS_LEVELS[self._module.params['access_level']]:
+ return False
+ if self.access_token_object.expires_at != self._module.params['expires_at']:
+ return False
+ return True
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(auth_argument_spec())
+ argument_spec.update(dict(
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ group=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ scopes=dict(type='list',
+ required=True,
+ aliases=['scope'],
+ elements='str',
+ choices=['api',
+ 'read_api',
+ 'read_registry',
+ 'write_registry',
+ 'read_repository',
+ 'write_repository',
+ 'create_runner',
+ 'ai_features',
+ 'k8s_proxy']),
+ access_level=dict(type='str', required=False, default='maintainer', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']),
+ expires_at=dict(type='str', required=True),
+ recreate=dict(type='str', default='never', choices=['never', 'always', 'state_change'])
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_username', 'api_oauth_token'],
+ ['api_username', 'api_job_token'],
+ ['api_token', 'api_oauth_token'],
+ ['api_token', 'api_job_token']
+ ],
+ required_together=[
+ ['api_username', 'api_password']
+ ],
+ required_one_of=[
+ ['api_username', 'api_token', 'api_oauth_token', 'api_job_token']
+ ],
+ supports_check_mode=True
+ )
+
+ state = module.params['state']
+ group_identifier = module.params['group']
+ name = module.params['name']
+ scopes = module.params['scopes']
+ access_level_str = module.params['access_level']
+ expires_at = module.params['expires_at']
+ recreate = module.params['recreate']
+
+ access_level = ACCESS_LEVELS[access_level_str]
+
+ try:
+ datetime.strptime(expires_at, '%Y-%m-%d')
+ except ValueError:
+ module.fail_json(msg="Argument expires_at is not in required format YYYY-MM-DD")
+
+ gitlab_instance = gitlab_authentication(module)
+
+ gitlab_access_token = GitLabGroupAccessToken(module, gitlab_instance)
+
+ group = find_group(gitlab_instance, group_identifier)
+ if group is None:
+ module.fail_json(msg="Failed to create access token: group %s does not exists" % group_identifier)
+
+ gitlab_access_token_exists = False
+ gitlab_access_token.find_access_token(group, name)
+ if gitlab_access_token.access_token_object is not None:
+ gitlab_access_token_exists = True
+
+ if state == 'absent':
+ if gitlab_access_token_exists:
+ gitlab_access_token.revoke_access_token()
+ module.exit_json(changed=True, msg="Successfully deleted access token %s" % name)
+ else:
+ module.exit_json(changed=False, msg="Access token does not exists")
+
+ if state == 'present':
+ if gitlab_access_token_exists:
+ if gitlab_access_token.access_tokens_equal():
+ if recreate == 'always':
+ gitlab_access_token.revoke_access_token()
+ gitlab_access_token.create_access_token(group, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at})
+ module.exit_json(changed=True, msg="Successfully recreated access token", access_token=gitlab_access_token.access_token_object._attrs)
+ else:
+ module.exit_json(changed=False, msg="Access token already exists", access_token=gitlab_access_token.access_token_object._attrs)
+ else:
+ if recreate == 'never':
+ module.fail_json(msg="Access token already exists and its state is different. It can not be updated without recreating.")
+ else:
+ gitlab_access_token.revoke_access_token()
+ gitlab_access_token.create_access_token(group, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at})
+ module.exit_json(changed=True, msg="Successfully recreated access token", access_token=gitlab_access_token.access_token_object._attrs)
+ else:
+ gitlab_access_token.create_access_token(group, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at})
+ module.exit_json(changed=True, msg="Successfully created access token", access_token=gitlab_access_token.access_token_object._attrs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_group_members.py b/ansible_collections/community/general/plugins/modules/gitlab_group_members.py
index 66298e882..ca82891e3 100644
--- a/ansible_collections/community/general/plugins/modules/gitlab_group_members.py
+++ b/ansible_collections/community/general/plugins/modules/gitlab_group_members.py
@@ -40,22 +40,22 @@ options:
gitlab_user:
description:
- A username or a list of usernames to add to/remove from the GitLab group.
- - Mutually exclusive with I(gitlab_users_access).
+ - Mutually exclusive with O(gitlab_users_access).
type: list
elements: str
access_level:
description:
- The access level for the user.
- - Required if I(state=present), user state is set to present.
- - Mutually exclusive with I(gitlab_users_access).
+ - Required if O(state=present), user state is set to present.
+ - Mutually exclusive with O(gitlab_users_access).
type: str
choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner']
gitlab_users_access:
description:
- Provide a list of user to access level mappings.
- Every dictionary in this list specifies a user (by username) and the access level the user should have.
- - Mutually exclusive with I(gitlab_user) and I(access_level).
- - Use together with I(purge_users) to remove all users not specified here from the group.
+ - Mutually exclusive with O(gitlab_user) and O(access_level).
+ - Use together with O(purge_users) to remove all users not specified here from the group.
type: list
elements: dict
suboptions:
@@ -66,7 +66,7 @@ options:
access_level:
description:
- The access level for the user.
- - Required if I(state=present), user state is set to present.
+ - Required if O(state=present), user state is set to present.
type: str
choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner']
required: true
@@ -74,16 +74,16 @@ options:
state:
description:
- State of the member in the group.
- - On C(present), it adds a user to a GitLab group.
- - On C(absent), it removes a user from a GitLab group.
+ - On V(present), it adds a user to a GitLab group.
+ - On V(absent), it removes a user from a GitLab group.
choices: ['present', 'absent']
default: 'present'
type: str
purge_users:
description:
- - Adds/remove users of the given access_level to match the given I(gitlab_user)/I(gitlab_users_access) list.
+ - Adds/remove users of the given access_level to match the given O(gitlab_user)/O(gitlab_users_access) list.
If omitted do not purge orphaned members.
- - Is only used when I(state=present).
+ - Is only used when O(state=present).
type: list
elements: str
choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner']
@@ -160,7 +160,7 @@ from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.gitlab import (
- auth_argument_spec, gitlab_authentication, gitlab, ensure_gitlab_package
+ auth_argument_spec, gitlab_authentication, gitlab, list_all_kwargs
)
@@ -171,16 +171,20 @@ class GitLabGroup(object):
# get user id if the user exists
def get_user_id(self, gitlab_user):
- user_exists = self._gitlab.users.list(username=gitlab_user, all=True)
- if user_exists:
- return user_exists[0].id
+ return next(
+ (u.id for u in self._gitlab.users.list(username=gitlab_user, **list_all_kwargs)),
+ None
+ )
# get group id if group exists
def get_group_id(self, gitlab_group):
- groups = self._gitlab.groups.list(search=gitlab_group, all=True)
- for group in groups:
- if group.full_path == gitlab_group:
- return group.id
+ return next(
+ (
+ g.id for g in self._gitlab.groups.list(search=gitlab_group, **list_all_kwargs)
+ if g.full_path == gitlab_group
+ ),
+ None
+ )
# get all members in a group
def get_members_in_a_group(self, gitlab_group_id):
@@ -273,14 +277,16 @@ def main():
],
supports_check_mode=True,
)
- ensure_gitlab_package(module)
+
+ # check prerequisites and connect to gitlab server
+ gl = gitlab_authentication(module)
access_level_int = {
- 'guest': gitlab.GUEST_ACCESS,
- 'reporter': gitlab.REPORTER_ACCESS,
- 'developer': gitlab.DEVELOPER_ACCESS,
- 'maintainer': gitlab.MAINTAINER_ACCESS,
- 'owner': gitlab.OWNER_ACCESS,
+ 'guest': gitlab.const.GUEST_ACCESS,
+ 'reporter': gitlab.const.REPORTER_ACCESS,
+ 'developer': gitlab.const.DEVELOPER_ACCESS,
+ 'maintainer': gitlab.const.MAINTAINER_ACCESS,
+ 'owner': gitlab.const.OWNER_ACCESS,
}
gitlab_group = module.params['gitlab_group']
@@ -291,9 +297,6 @@ def main():
if purge_users:
purge_users = [access_level_int[level] for level in purge_users]
- # connect to gitlab server
- gl = gitlab_authentication(module)
-
group = GitLabGroup(module, gl)
gitlab_group_id = group.get_group_id(gitlab_group)
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_group_variable.py b/ansible_collections/community/general/plugins/modules/gitlab_group_variable.py
index c7befe123..32e5aaa90 100644
--- a/ansible_collections/community/general/plugins/modules/gitlab_group_variable.py
+++ b/ansible_collections/community/general/plugins/modules/gitlab_group_variable.py
@@ -17,11 +17,10 @@ description:
- Creates a group variable if it does not exist.
- When a group variable does exist, its value will be updated when the values are different.
- Variables which are untouched in the playbook, but are not untouched in the GitLab group,
- they stay untouched (I(purge) is C(false)) or will be deleted (I(purge) is C(true)).
+ they stay untouched (O(purge=false)) or will be deleted (O(purge=true)).
author:
- Florent Madiot (@scodeman)
requirements:
- - python >= 2.7
- python-gitlab python module
extends_documentation_fragment:
- community.general.auth_basic
@@ -48,20 +47,21 @@ options:
type: str
purge:
description:
- - When set to C(true), delete all variables which are not untouched in the task.
+ - When set to V(true), delete all variables which are not untouched in the task.
default: false
type: bool
vars:
description:
- - When the list element is a simple key-value pair, set masked and protected to false.
- - When the list element is a dict with the keys I(value), I(masked) and I(protected), the user can
- have full control about whether a value should be masked, protected or both.
+ - When the list element is a simple key-value pair, masked, raw and protected will be set to false.
+ - When the list element is a dict with the keys C(value), C(masked), C(raw) and C(protected), the user can
+ have full control about whether a value should be masked, raw, protected or both.
- Support for group variables requires GitLab >= 9.5.
- Support for environment_scope requires GitLab Premium >= 13.11.
- Support for protected values requires GitLab >= 9.3.
- Support for masked values requires GitLab >= 11.10.
- - A I(value) must be a string or a number.
- - Field I(variable_type) must be a string with either C(env_var), which is the default, or C(file).
+ - Support for raw values requires GitLab >= 15.7.
+ - A C(value) must be a string or a number.
+ - Field C(variable_type) must be a string with either V(env_var), which is the default, or V(file).
- When a value is masked, it must be in Base64 and have a length of at least 8 characters.
See GitLab documentation on acceptable values for a masked variable (U(https://docs.gitlab.com/ce/ci/variables/#masked-variables)).
default: {}
@@ -70,7 +70,7 @@ options:
version_added: 4.5.0
description:
- A list of dictionaries that represents CI/CD variables.
- - This modules works internal with this sructure, even if the older I(vars) parameter is used.
+ - This modules works internal with this structure, even if the older O(vars) parameter is used.
default: []
type: list
elements: dict
@@ -83,21 +83,28 @@ options:
value:
description:
- The variable value.
- - Required when I(state=present).
+ - Required when O(state=present).
type: str
masked:
description:
- - Wether variable value is masked or not.
+ - Whether variable value is masked or not.
type: bool
default: false
protected:
description:
- - Wether variable value is protected or not.
+ - Whether variable value is protected or not.
type: bool
default: false
+ raw:
+ description:
+ - Whether variable value is raw or not.
+ - Support for raw values requires GitLab >= 15.7.
+ type: bool
+ default: false
+ version_added: '7.4.0'
variable_type:
description:
- - Wether a variable is an environment variable (C(env_var)) or a file (C(file)).
+ - Whether a variable is an environment variable (V(env_var)) or a file (V(file)).
type: str
choices: [ "env_var", "file" ]
default: env_var
@@ -126,6 +133,38 @@ EXAMPLES = r'''
variable_type: env_var
environment_scope: production
+- name: Set or update some CI/CD variables with raw value
+ community.general.gitlab_group_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ group: scodeman/testgroup/
+ purge: false
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY:
+ value: 3214cbad
+ masked: true
+ protected: true
+ raw: true
+ variable_type: env_var
+ environment_scope: '*'
+
+- name: Set or update some CI/CD variables with expandable value
+ community.general.gitlab_group_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ group: scodeman/testgroup/
+ purge: false
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY:
+ value: '$MY_OTHER_VARIABLE'
+ masked: true
+ protected: true
+ raw: false
+ variable_type: env_var
+ environment_scope: '*'
+
- name: Delete one variable
community.general.gitlab_group_variable:
api_url: https://gitlab.com
@@ -166,52 +205,12 @@ group_variable:
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.api import basic_auth_argument_spec
-from ansible.module_utils.six import string_types
-from ansible.module_utils.six import integer_types
-
from ansible_collections.community.general.plugins.module_utils.gitlab import (
- auth_argument_spec, gitlab_authentication, ensure_gitlab_package, filter_returned_variables
+ auth_argument_spec, gitlab_authentication, filter_returned_variables, vars_to_variables,
+ list_all_kwargs
)
-def vars_to_variables(vars, module):
- # transform old vars to new variables structure
- variables = list()
- for item, value in vars.items():
- if (isinstance(value, string_types) or
- isinstance(value, (integer_types, float))):
- variables.append(
- {
- "name": item,
- "value": str(value),
- "masked": False,
- "protected": False,
- "variable_type": "env_var",
- }
- )
-
- elif isinstance(value, dict):
- new_item = {"name": item, "value": value.get('value')}
-
- new_item = {
- "name": item,
- "value": value.get('value'),
- "masked": value.get('masked'),
- "protected": value.get('protected'),
- "variable_type": value.get('variable_type'),
- }
-
- if value.get('environment_scope'):
- new_item['environment_scope'] = value.get('environment_scope')
-
- variables.append(new_item)
-
- else:
- module.fail_json(msg="value must be of type string, integer, float or dict")
-
- return variables
-
-
class GitlabGroupVariables(object):
def __init__(self, module, gitlab_instance):
@@ -223,14 +222,7 @@ class GitlabGroupVariables(object):
return self.repo.groups.get(group_name)
def list_all_group_variables(self):
- page_nb = 1
- variables = []
- vars_page = self.group.variables.list(page=page_nb)
- while len(vars_page) > 0:
- variables += vars_page
- page_nb += 1
- vars_page = self.group.variables.list(page=page_nb)
- return variables
+ return list(self.group.variables.list(**list_all_kwargs))
def create_variable(self, var_obj):
if self._module.check_mode:
@@ -240,6 +232,7 @@ class GitlabGroupVariables(object):
"value": var_obj.get('value'),
"masked": var_obj.get('masked'),
"protected": var_obj.get('protected'),
+ "raw": var_obj.get('raw'),
"variable_type": var_obj.get('variable_type'),
}
if var_obj.get('environment_scope') is not None:
@@ -308,6 +301,8 @@ def native_python_main(this_gitlab, purge, requested_variables, state, module):
item['value'] = str(item.get('value'))
if item.get('protected') is None:
item['protected'] = False
+ if item.get('raw') is None:
+ item['raw'] = False
if item.get('masked') is None:
item['masked'] = False
if item.get('environment_scope') is None:
@@ -379,11 +374,14 @@ def main():
group=dict(type='str', required=True),
purge=dict(type='bool', required=False, default=False),
vars=dict(type='dict', required=False, default=dict(), no_log=True),
+ # please mind whenever changing the variables dict to also change module_utils/gitlab.py's
+ # KNOWN dict in filter_returned_variables or bad evil will happen
variables=dict(type='list', elements='dict', required=False, default=list(), options=dict(
name=dict(type='str', required=True),
value=dict(type='str', no_log=True),
masked=dict(type='bool', default=False),
protected=dict(type='bool', default=False),
+ raw=dict(type='bool', default=False),
environment_scope=dict(type='str', default='*'),
variable_type=dict(type='str', default='env_var', choices=["env_var", "file"])
)),
@@ -408,7 +406,9 @@ def main():
],
supports_check_mode=True
)
- ensure_gitlab_package(module)
+
+ # check prerequisites and connect to gitlab server
+ gitlab_instance = gitlab_authentication(module)
purge = module.params['purge']
var_list = module.params['vars']
@@ -423,8 +423,6 @@ def main():
if any(x['value'] is None for x in variables):
module.fail_json(msg='value parameter is required in state present')
- gitlab_instance = gitlab_authentication(module)
-
this_gitlab = GitlabGroupVariables(module=module, gitlab_instance=gitlab_instance)
changed, raw_return_value, before, after = native_python_main(this_gitlab, purge, variables, state, module)
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_hook.py b/ansible_collections/community/general/plugins/modules/gitlab_hook.py
index adf90eb7b..58781d182 100644
--- a/ansible_collections/community/general/plugins/modules/gitlab_hook.py
+++ b/ansible_collections/community/general/plugins/modules/gitlab_hook.py
@@ -21,7 +21,6 @@ author:
- Marcus Watkins (@marwatk)
- Guillaume Martinez (@Lunik)
requirements:
- - python >= 2.7
- python-gitlab python module
extends_documentation_fragment:
- community.general.auth_basic
@@ -47,8 +46,8 @@ options:
type: str
state:
description:
- - When C(present) the hook will be updated to match the input or created if it doesn't exist.
- - When C(absent) hook will be deleted if it exists.
+ - When V(present) the hook will be updated to match the input or created if it doesn't exist.
+ - When V(absent) hook will be deleted if it exists.
default: present
type: str
choices: [ "present", "absent" ]
@@ -98,6 +97,11 @@ options:
- Trigger hook on wiki events.
type: bool
default: false
+ releases_events:
+ description:
+ - Trigger hook on release events.
+ type: bool
+ version_added: '8.4.0'
hook_validate_certs:
description:
- Whether GitLab will do SSL verification when triggering the hook.
@@ -123,7 +127,6 @@ EXAMPLES = '''
state: present
push_events: true
tag_push_events: true
- hook_validate_certs: false
token: "my-super-secret-token-that-my-ci-server-will-check"
- name: "Delete the previous hook"
@@ -171,7 +174,7 @@ from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.gitlab import (
- auth_argument_spec, find_project, gitlab_authentication, ensure_gitlab_package
+ auth_argument_spec, find_project, gitlab_authentication, list_all_kwargs
)
@@ -203,6 +206,7 @@ class GitLabHook(object):
'job_events': options['job_events'],
'pipeline_events': options['pipeline_events'],
'wiki_page_events': options['wiki_page_events'],
+ 'releases_events': options['releases_events'],
'enable_ssl_verification': options['enable_ssl_verification'],
'token': options['token'],
})
@@ -218,6 +222,7 @@ class GitLabHook(object):
'job_events': options['job_events'],
'pipeline_events': options['pipeline_events'],
'wiki_page_events': options['wiki_page_events'],
+ 'releases_events': options['releases_events'],
'enable_ssl_verification': options['enable_ssl_verification'],
'token': options['token'],
})
@@ -266,8 +271,7 @@ class GitLabHook(object):
@param hook_url Url to call on event
'''
def find_hook(self, project, hook_url):
- hooks = project.hooks.list(all=True)
- for hook in hooks:
+ for hook in project.hooks.list(**list_all_kwargs):
if (hook.url == hook_url):
return hook
@@ -304,6 +308,7 @@ def main():
job_events=dict(type='bool', default=False),
pipeline_events=dict(type='bool', default=False),
wiki_page_events=dict(type='bool', default=False),
+ releases_events=dict(type='bool', default=None),
hook_validate_certs=dict(type='bool', default=False, aliases=['enable_ssl_verification']),
token=dict(type='str', no_log=True),
))
@@ -325,7 +330,9 @@ def main():
],
supports_check_mode=True,
)
- ensure_gitlab_package(module)
+
+ # check prerequisites and connect to gitlab server
+ gitlab_instance = gitlab_authentication(module)
state = module.params['state']
project_identifier = module.params['project']
@@ -339,11 +346,10 @@ def main():
job_events = module.params['job_events']
pipeline_events = module.params['pipeline_events']
wiki_page_events = module.params['wiki_page_events']
+ releases_events = module.params['releases_events']
enable_ssl_verification = module.params['hook_validate_certs']
hook_token = module.params['token']
- gitlab_instance = gitlab_authentication(module)
-
gitlab_hook = GitLabHook(module, gitlab_instance)
project = find_project(gitlab_instance, project_identifier)
@@ -371,6 +377,7 @@ def main():
"job_events": job_events,
"pipeline_events": pipeline_events,
"wiki_page_events": wiki_page_events,
+ "releases_events": releases_events,
"enable_ssl_verification": enable_ssl_verification,
"token": hook_token,
}):
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_instance_variable.py b/ansible_collections/community/general/plugins/modules/gitlab_instance_variable.py
new file mode 100644
index 000000000..cc2d812ca
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/gitlab_instance_variable.py
@@ -0,0 +1,360 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2023, Benedikt Braunger (bebr@adm.ku.dk)
+# Based on code:
+# Copyright (c) 2020, Florent Madiot (scodeman@scode.io)
+# Copyright (c) 2019, Markus Bergholz (markuman@gmail.com)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: gitlab_instance_variable
+short_description: Creates, updates, or deletes GitLab instance variables
+version_added: 7.1.0
+description:
+ - Creates a instance variable if it does not exist.
+ - When a instance variable does exist, its value will be updated if the values are different.
+ - Support for instance variables requires GitLab >= 13.0.
+ - Variables which are not mentioned in the modules options, but are present on the GitLab instance,
+ will either stay (O(purge=false)) or will be deleted (O(purge=true)).
+author:
+ - Benedikt Braunger (@benibr)
+requirements:
+ - python-gitlab python module
+extends_documentation_fragment:
+ - community.general.auth_basic
+ - community.general.gitlab
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ state:
+ description:
+ - Create or delete instance variable.
+ default: present
+ type: str
+ choices: ["present", "absent"]
+ purge:
+ description:
+ - When set to V(true), delete all variables which are not mentioned in the task.
+ default: false
+ type: bool
+ variables:
+ description:
+ - A list of dictionaries that represents CI/CD variables.
+ default: []
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the variable.
+ type: str
+ required: true
+ value:
+ description:
+ - The variable value.
+ - Required when O(state=present).
+ type: str
+ masked:
+ description:
+ - Whether variable value is masked or not.
+ type: bool
+ default: false
+ protected:
+ description:
+ - Whether variable value is protected or not.
+ type: bool
+ default: false
+ variable_type:
+ description:
+ - Whether a variable is an environment variable (V(env_var)) or a file (V(file)).
+ type: str
+ choices: [ "env_var", "file" ]
+ default: env_var
+'''
+
+
+EXAMPLES = r'''
+- name: Set or update some CI/CD variables
+ community.general.gitlab_instance_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ purge: false
+ variables:
+ - name: ACCESS_KEY_ID
+ value: abc1312cba
+ - name: SECRET_ACCESS_KEY
+ value: 1337
+ masked: true
+ protected: true
+ variable_type: env_var
+
+- name: Delete one variable
+ community.general.gitlab_instance_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ state: absent
+ variables:
+ - name: ACCESS_KEY_ID
+'''
+
+RETURN = r'''
+instance_variable:
+ description: Four lists of the variablenames which were added, updated, removed or exist.
+ returned: always
+ type: dict
+ contains:
+ added:
+ description: A list of variables which were created.
+ returned: always
+ type: list
+ sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ untouched:
+ description: A list of variables which exist.
+ returned: always
+ type: list
+ sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ removed:
+ description: A list of variables which were deleted.
+ returned: always
+ type: list
+ sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ updated:
+ description: A list pre-existing variables whose values have been set.
+ returned: always
+ type: list
+ sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible_collections.community.general.plugins.module_utils.gitlab import (
+ auth_argument_spec, gitlab_authentication, filter_returned_variables,
+ list_all_kwargs
+)
+
+
+class GitlabInstanceVariables(object):
+
+ def __init__(self, module, gitlab_instance):
+ self.instance = gitlab_instance
+ self._module = module
+
+ def list_all_instance_variables(self):
+ return list(self.instance.variables.list(**list_all_kwargs))
+
+ def create_variable(self, var_obj):
+ if self._module.check_mode:
+ return True
+ var = {
+ "key": var_obj.get('key'),
+ "value": var_obj.get('value'),
+ "masked": var_obj.get('masked'),
+ "protected": var_obj.get('protected'),
+ "variable_type": var_obj.get('variable_type'),
+ }
+
+ self.instance.variables.create(var)
+ return True
+
+ def update_variable(self, var_obj):
+ if self._module.check_mode:
+ return True
+ self.delete_variable(var_obj)
+ self.create_variable(var_obj)
+ return True
+
+ def delete_variable(self, var_obj):
+ if self._module.check_mode:
+ return True
+ self.instance.variables.delete(var_obj.get('key'))
+ return True
+
+
+def compare(requested_variables, existing_variables, state):
+ # we need to do this, because it was determined in a previous version - more or less buggy
+ # basically it is not necessary and might results in more/other bugs!
+ # but it is required and only relevant for check mode!!
+ # logic represents state 'present' when not purge. all other can be derived from that
+ # untouched => equal in both
+ # updated => name and scope are equal
+ # added => name and scope does not exist
+ untouched = list()
+ updated = list()
+ added = list()
+
+ if state == 'present':
+ existing_key_scope_vars = list()
+ for item in existing_variables:
+ existing_key_scope_vars.append({'key': item.get('key')})
+
+ for var in requested_variables:
+ if var in existing_variables:
+ untouched.append(var)
+ else:
+ compare_item = {'key': var.get('name')}
+ if compare_item in existing_key_scope_vars:
+ updated.append(var)
+ else:
+ added.append(var)
+
+ return untouched, updated, added
+
+
+def native_python_main(this_gitlab, purge, requested_variables, state, module):
+
+ change = False
+ return_value = dict(added=list(), updated=list(), removed=list(), untouched=list())
+
+ gitlab_keys = this_gitlab.list_all_instance_variables()
+ before = [x.attributes for x in gitlab_keys]
+
+ existing_variables = filter_returned_variables(gitlab_keys)
+
+ for item in requested_variables:
+ item['key'] = item.pop('name')
+ item['value'] = str(item.get('value'))
+ if item.get('protected') is None:
+ item['protected'] = False
+ if item.get('masked') is None:
+ item['masked'] = False
+ if item.get('variable_type') is None:
+ item['variable_type'] = 'env_var'
+
+ if module.check_mode:
+ untouched, updated, added = compare(requested_variables, existing_variables, state)
+
+ if state == 'present':
+ add_or_update = [x for x in requested_variables if x not in existing_variables]
+ for item in add_or_update:
+ try:
+ if this_gitlab.create_variable(item):
+ return_value['added'].append(item)
+
+ except Exception:
+ if this_gitlab.update_variable(item):
+ return_value['updated'].append(item)
+
+ if purge:
+ # refetch and filter
+ gitlab_keys = this_gitlab.list_all_instance_variables()
+ existing_variables = filter_returned_variables(gitlab_keys)
+
+ remove = [x for x in existing_variables if x not in requested_variables]
+ for item in remove:
+ if this_gitlab.delete_variable(item):
+ return_value['removed'].append(item)
+
+ elif state == 'absent':
+ # value does not matter on removing variables.
+ # key and environment scope are sufficient
+ for item in existing_variables:
+ item.pop('value')
+ item.pop('variable_type')
+ for item in requested_variables:
+ item.pop('value')
+ item.pop('variable_type')
+
+ if not purge:
+ remove_requested = [x for x in requested_variables if x in existing_variables]
+ for item in remove_requested:
+ if this_gitlab.delete_variable(item):
+ return_value['removed'].append(item)
+
+ else:
+ for item in existing_variables:
+ if this_gitlab.delete_variable(item):
+ return_value['removed'].append(item)
+
+ if module.check_mode:
+ return_value = dict(added=added, updated=updated, removed=return_value['removed'], untouched=untouched)
+
+ if len(return_value['added'] + return_value['removed'] + return_value['updated']) > 0:
+ change = True
+
+ gitlab_keys = this_gitlab.list_all_instance_variables()
+ after = [x.attributes for x in gitlab_keys]
+
+ return change, return_value, before, after
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(auth_argument_spec())
+ argument_spec.update(
+ purge=dict(type='bool', required=False, default=False),
+ variables=dict(type='list', elements='dict', required=False, default=list(), options=dict(
+ name=dict(type='str', required=True),
+ value=dict(type='str', no_log=True),
+ masked=dict(type='bool', default=False),
+ protected=dict(type='bool', default=False),
+ variable_type=dict(type='str', default='env_var', choices=["env_var", "file"])
+ )),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_username', 'api_oauth_token'],
+ ['api_username', 'api_job_token'],
+ ['api_token', 'api_oauth_token'],
+ ['api_token', 'api_job_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token', 'api_oauth_token', 'api_job_token']
+ ],
+ supports_check_mode=True
+ )
+
+ # check prerequisites and connect to gitlab server
+ gitlab_instance = gitlab_authentication(module)
+
+ purge = module.params['purge']
+ state = module.params['state']
+
+ variables = module.params['variables']
+
+ if state == 'present':
+ if any(x['value'] is None for x in variables):
+ module.fail_json(msg='value parameter is required in state present')
+
+ this_gitlab = GitlabInstanceVariables(module=module, gitlab_instance=gitlab_instance)
+
+ changed, raw_return_value, before, after = native_python_main(this_gitlab, purge, variables, state, module)
+
+ # postprocessing
+ for item in after:
+ item['name'] = item.pop('key')
+ for item in before:
+ item['name'] = item.pop('key')
+
+ untouched_key_name = 'key'
+ if not module.check_mode:
+ untouched_key_name = 'name'
+ raw_return_value['untouched'] = [x for x in before if x in after]
+
+ added = [x.get('key') for x in raw_return_value['added']]
+ updated = [x.get('key') for x in raw_return_value['updated']]
+ removed = [x.get('key') for x in raw_return_value['removed']]
+ untouched = [x.get(untouched_key_name) for x in raw_return_value['untouched']]
+ return_value = dict(added=added, updated=updated, removed=removed, untouched=untouched)
+
+ module.exit_json(changed=changed, instance_variable=return_value)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_issue.py b/ansible_collections/community/general/plugins/modules/gitlab_issue.py
new file mode 100644
index 000000000..6d95bf6cf
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/gitlab_issue.py
@@ -0,0 +1,408 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2023, Ondrej Zvara (ozvara1@gmail.com)
+# Based on code:
+# Copyright (c) 2021, Lennert Mertens (lennert@nubera.be)
+# Copyright (c) 2021, Werner Dijkerman (ikben@werner-dijkerman.nl)
+# Copyright (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
+# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: gitlab_issue
+short_description: Create, update, or delete GitLab issues
+version_added: '8.1.0'
+description:
+ - Creates an issue if it does not exist.
+ - When an issue does exist, it will be updated if the provided parameters are different.
+ - When an issue does exist and O(state=absent), the issue will be deleted.
+ - When multiple issues are detected, the task fails.
+ - Existing issues are matched based on O(title) and O(state_filter) filters.
+author:
+ - zvaraondrej (@zvaraondrej)
+requirements:
+ - python-gitlab >= 2.3.0
+extends_documentation_fragment:
+ - community.general.auth_basic
+ - community.general.gitlab
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ assignee_ids:
+ description:
+ - A list of assignee usernames omitting V(@) character.
+ - Set to an empty array to unassign all assignees.
+ type: list
+ elements: str
+ description:
+ description:
+ - A description of the issue.
+ - Gets overridden by a content of file specified at O(description_path), if found.
+ type: str
+ description_path:
+ description:
+ - A path of file containing issue's description.
+ - Accepts MarkDown formatted files.
+ type: path
+ issue_type:
+ description:
+ - Type of the issue.
+ default: issue
+ type: str
+ choices: ["issue", "incident", "test_case"]
+ labels:
+ description:
+ - A list of label names.
+ - Set to an empty array to remove all labels.
+ type: list
+ elements: str
+ milestone_search:
+ description:
+ - The name of the milestone.
+ - Set to empty string to unassign milestone.
+ type: str
+ milestone_group_id:
+ description:
+ - The path or numeric ID of the group hosting desired milestone.
+ type: str
+ project:
+ description:
+ - The path or name of the project.
+ required: true
+ type: str
+ state:
+ description:
+ - Create or delete issue.
+ default: present
+ type: str
+ choices: ["present", "absent"]
+ state_filter:
+ description:
+ - Filter specifying state of issues while searching.
+ type: str
+ choices: ["opened", "closed"]
+ default: opened
+ title:
+ description:
+ - A title for the issue. The title is used as a unique identifier to ensure idempotency.
+ type: str
+ required: true
+'''
+
+
+EXAMPLES = '''
+- name: Create Issue
+ community.general.gitlab_issue:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: "group1/project1"
+ title: "Ansible demo Issue"
+ description: "Demo Issue description"
+ labels:
+ - Ansible
+ - Demo
+ assignee_ids:
+ - testassignee
+ state_filter: "opened"
+ state: present
+
+- name: Delete Issue
+ community.general.gitlab_issue:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: "group1/project1"
+ title: "Ansible demo Issue"
+ state_filter: "opened"
+ state: absent
+'''
+
+RETURN = r'''
+msg:
+ description: Success or failure message.
+ returned: always
+ type: str
+ sample: "Success"
+
+issue:
+ description: API object.
+ returned: success
+ type: dict
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.common.text.converters import to_native, to_text
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+from ansible_collections.community.general.plugins.module_utils.gitlab import (
+ auth_argument_spec, gitlab_authentication, gitlab, find_project, find_group
+)
+
+
+class GitlabIssue(object):
+
+ def __init__(self, module, project, gitlab_instance):
+ self._gitlab = gitlab_instance
+ self._module = module
+ self.project = project
+
+ '''
+ @param milestone_id Title of the milestone
+ '''
+ def get_milestone(self, milestone_id, group):
+ milestones = []
+ try:
+ milestones = group.milestones.list(search=milestone_id)
+ except gitlab.exceptions.GitlabGetError as e:
+ self._module.fail_json(msg="Failed to list the Milestones: %s" % to_native(e))
+
+ if len(milestones) > 1:
+ self._module.fail_json(msg="Multiple Milestones matched search criteria.")
+ if len(milestones) < 1:
+ self._module.fail_json(msg="No Milestones matched search criteria.")
+ if len(milestones) == 1:
+ try:
+ return group.milestones.get(id=milestones[0].id)
+ except gitlab.exceptions.GitlabGetError as e:
+ self._module.fail_json(msg="Failed to get the Milestones: %s" % to_native(e))
+
+ '''
+ @param title Title of the Issue
+ @param state_filter Issue's state to filter on
+ '''
+ def get_issue(self, title, state_filter):
+ issues = []
+ try:
+ issues = self.project.issues.list(query_parameters={"search": title, "in": "title", "state": state_filter})
+ except gitlab.exceptions.GitlabGetError as e:
+ self._module.fail_json(msg="Failed to list the Issues: %s" % to_native(e))
+
+ if len(issues) > 1:
+ self._module.fail_json(msg="Multiple Issues matched search criteria.")
+ if len(issues) == 1:
+ try:
+ return self.project.issues.get(id=issues[0].iid)
+ except gitlab.exceptions.GitlabGetError as e:
+ self._module.fail_json(msg="Failed to get the Issue: %s" % to_native(e))
+
+ '''
+ @param username Name of the user
+ '''
+ def get_user(self, username):
+ users = []
+ try:
+ users = [user for user in self.project.users.list(username=username, all=True) if user.username == username]
+ except gitlab.exceptions.GitlabGetError as e:
+ self._module.fail_json(msg="Failed to list the users: %s" % to_native(e))
+
+ if len(users) > 1:
+ self._module.fail_json(msg="Multiple Users matched search criteria.")
+ elif len(users) < 1:
+ self._module.fail_json(msg="No User matched search criteria.")
+ else:
+ return users[0]
+
+ '''
+ @param users List of usernames
+ '''
+ def get_user_ids(self, users):
+ return [self.get_user(user).id for user in users]
+
+ '''
+ @param options Options of the Issue
+ '''
+ def create_issue(self, options):
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created Issue '%s'." % options["title"])
+
+ try:
+ return self.project.issues.create(options)
+ except gitlab.exceptions.GitlabCreateError as e:
+ self._module.fail_json(msg="Failed to create Issue: %s " % to_native(e))
+
+ '''
+ @param issue Issue object to delete
+ '''
+ def delete_issue(self, issue):
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully deleted Issue '%s'." % issue["title"])
+
+ try:
+ return issue.delete()
+ except gitlab.exceptions.GitlabDeleteError as e:
+ self._module.fail_json(msg="Failed to delete Issue: '%s'." % to_native(e))
+
+ '''
+ @param issue Issue object to update
+ @param options Options of the Issue
+ '''
+ def update_issue(self, issue, options):
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully updated Issue '%s'." % issue["title"])
+
+ try:
+ return self.project.issues.update(issue.iid, options)
+ except gitlab.exceptions.GitlabUpdateError as e:
+ self._module.fail_json(msg="Failed to update Issue %s." % to_native(e))
+
+ '''
+ @param issue Issue object to evaluate
+ @param options New options to update Issue with
+ '''
+ def issue_has_changed(self, issue, options):
+ for key, value in options.items():
+ if value is not None:
+
+ if key == 'milestone_id':
+ old_milestone = getattr(issue, 'milestone')['id'] if getattr(issue, 'milestone') else ""
+ if options[key] != old_milestone:
+ return True
+ elif key == 'assignee_ids':
+ if options[key] != sorted([user["id"] for user in getattr(issue, 'assignees')]):
+ return True
+
+ elif key == 'labels':
+ if options[key] != sorted(getattr(issue, key)):
+ return True
+
+ elif getattr(issue, key) != value:
+ return True
+
+ return False
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(auth_argument_spec())
+ argument_spec.update(
+ assignee_ids=dict(type='list', elements='str', required=False),
+ description=dict(type='str', required=False),
+ description_path=dict(type='path', required=False),
+ issue_type=dict(type='str', default='issue', choices=["issue", "incident", "test_case"], required=False),
+ labels=dict(type='list', elements='str', required=False),
+ milestone_search=dict(type='str', required=False),
+ milestone_group_id=dict(type='str', required=False),
+ project=dict(type='str', required=True),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ state_filter=dict(type='str', default="opened", choices=["opened", "closed"]),
+ title=dict(type='str', required=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_username', 'api_oauth_token'],
+ ['api_username', 'api_job_token'],
+ ['api_token', 'api_oauth_token'],
+ ['api_token', 'api_job_token'],
+ ['description', 'description_path'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ['milestone_search', 'milestone_group_id'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token', 'api_oauth_token', 'api_job_token']
+ ],
+ supports_check_mode=True
+ )
+
+ assignee_ids = module.params['assignee_ids']
+ description = module.params['description']
+ description_path = module.params['description_path']
+ issue_type = module.params['issue_type']
+ labels = module.params['labels']
+ milestone_id = module.params['milestone_search']
+ milestone_group_id = module.params['milestone_group_id']
+ project = module.params['project']
+ state = module.params['state']
+ state_filter = module.params['state_filter']
+ title = module.params['title']
+
+ gitlab_version = gitlab.__version__
+ if LooseVersion(gitlab_version) < LooseVersion('2.3.0'):
+ module.fail_json(msg="community.general.gitlab_issue requires python-gitlab Python module >= 2.3.0 (installed version: [%s])."
+ " Please upgrade python-gitlab to version 2.3.0 or above." % gitlab_version)
+
+ # check prerequisites and connect to gitlab server
+ gitlab_instance = gitlab_authentication(module)
+
+ this_project = find_project(gitlab_instance, project)
+ if this_project is None:
+ module.fail_json(msg="Failed to get the project: %s" % project)
+
+ this_gitlab = GitlabIssue(module=module, project=this_project, gitlab_instance=gitlab_instance)
+
+ if milestone_id and milestone_group_id:
+ this_group = find_group(gitlab_instance, milestone_group_id)
+ if this_group is None:
+ module.fail_json(msg="Failed to get the group: %s" % milestone_group_id)
+
+ milestone_id = this_gitlab.get_milestone(milestone_id, this_group).id
+
+ this_issue = this_gitlab.get_issue(title, state_filter)
+
+ if state == "present":
+ if description_path:
+ try:
+ with open(description_path, 'rb') as f:
+ description = to_text(f.read(), errors='surrogate_or_strict')
+ except IOError as e:
+ module.fail_json(msg='Cannot open {0}: {1}'.format(description_path, e))
+
+ # sorting necessary in order to properly detect changes, as we don't want to get false positive
+ # results due to differences in ids ordering;
+ assignee_ids = sorted(this_gitlab.get_user_ids(assignee_ids)) if assignee_ids else assignee_ids
+ labels = sorted(labels) if labels else labels
+
+ options = {
+ "title": title,
+ "description": description,
+ "labels": labels,
+ "issue_type": issue_type,
+ "milestone_id": milestone_id,
+ "assignee_ids": assignee_ids,
+ }
+
+ if not this_issue:
+ issue = this_gitlab.create_issue(options)
+ module.exit_json(
+ changed=True, msg="Created Issue '{t}'.".format(t=title),
+ issue=issue.asdict()
+ )
+ else:
+ if this_gitlab.issue_has_changed(this_issue, options):
+ issue = this_gitlab.update_issue(this_issue, options)
+ module.exit_json(
+ changed=True, msg="Updated Issue '{t}'.".format(t=title),
+ issue=issue
+ )
+ else:
+ module.exit_json(
+ changed=False, msg="Issue '{t}' already exists".format(t=title),
+ issue=this_issue.asdict()
+ )
+ elif state == "absent":
+ if not this_issue:
+ module.exit_json(changed=False, msg="Issue '{t}' does not exist or has already been deleted.".format(t=title))
+ else:
+ issue = this_gitlab.delete_issue(this_issue)
+ module.exit_json(
+ changed=True, msg="Issue '{t}' deleted.".format(t=title),
+ issue=issue
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_label.py b/ansible_collections/community/general/plugins/modules/gitlab_label.py
new file mode 100644
index 000000000..f2c8393f2
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/gitlab_label.py
@@ -0,0 +1,500 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2023, Gabriele Pongelli (gabriele.pongelli@gmail.com)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: gitlab_label
+short_description: Creates/updates/deletes GitLab Labels belonging to project or group.
+version_added: 8.3.0
+description:
+ - When a label does not exist, it will be created.
+ - When a label does exist, its value will be updated when the values are different.
+ - Labels can be purged.
+author:
+ - "Gabriele Pongelli (@gpongelli)"
+requirements:
+ - python-gitlab python module
+extends_documentation_fragment:
+ - community.general.auth_basic
+ - community.general.gitlab
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ state:
+ description:
+ - Create or delete project or group label.
+ default: present
+ type: str
+ choices: ["present", "absent"]
+ purge:
+ description:
+ - When set to V(true), delete all labels which are not mentioned in the task.
+ default: false
+ type: bool
+ required: false
+ project:
+ description:
+ - The path and name of the project. Either this or O(group) is required.
+ required: false
+ type: str
+ group:
+ description:
+ - The path of the group. Either this or O(project) is required.
+ required: false
+ type: str
+ labels:
+ description:
+ - A list of dictionaries that represents gitlab project's or group's labels.
+ type: list
+ elements: dict
+ required: false
+ default: []
+ suboptions:
+ name:
+ description:
+ - The name of the label.
+ type: str
+ required: true
+ color:
+ description:
+ - The color of the label.
+ - Required when O(state=present).
+ type: str
+ priority:
+ description:
+ - Integer value to give priority to the label.
+ type: int
+ required: false
+ default: null
+ description:
+ description:
+ - Label's description.
+ type: str
+ default: null
+ new_name:
+ description:
+ - Optional field to change label's name.
+ type: str
+ default: null
+'''
+
+
+EXAMPLES = '''
+# same project's task can be executed for group
+- name: Create one Label
+ community.general.gitlab_label:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: "group1/project1"
+ labels:
+ - name: label_one
+ color: "#123456"
+ state: present
+
+- name: Create many group labels
+ community.general.gitlab_label:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ group: "group1"
+ labels:
+ - name: label_one
+ color: "#123456"
+ description: this is a label
+ priority: 20
+ - name: label_two
+ color: "#554422"
+ state: present
+
+- name: Create many project labels
+ community.general.gitlab_label:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: "group1/project1"
+ labels:
+ - name: label_one
+ color: "#123456"
+ description: this is a label
+ priority: 20
+ - name: label_two
+ color: "#554422"
+ state: present
+
+- name: Set or update some labels
+ community.general.gitlab_label:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: "group1/project1"
+ labels:
+ - name: label_one
+ color: "#224488"
+ state: present
+
+- name: Add label in check mode
+ community.general.gitlab_label:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: "group1/project1"
+ labels:
+ - name: label_one
+ color: "#224488"
+ check_mode: true
+
+- name: Delete Label
+ community.general.gitlab_label:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: "group1/project1"
+ labels:
+ - name: label_one
+ state: absent
+
+- name: Change Label name
+ community.general.gitlab_label:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: "group1/project1"
+ labels:
+ - name: label_one
+ new_name: label_two
+ state: absent
+
+- name: Purge all labels
+ community.general.gitlab_label:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: "group1/project1"
+ purge: true
+
+- name: Delete many labels
+ community.general.gitlab_label:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: "group1/project1"
+ state: absent
+ labels:
+ - name: label-abc123
+ - name: label-two
+'''
+
+RETURN = '''
+labels:
+ description: Four lists of the labels which were added, updated, removed or exist.
+ returned: success
+ type: dict
+ contains:
+ added:
+ description: A list of labels which were created.
+ returned: always
+ type: list
+ sample: ['abcd', 'label-one']
+ untouched:
+ description: A list of labels which exist.
+ returned: always
+ type: list
+ sample: ['defg', 'new-label']
+ removed:
+ description: A list of labels which were deleted.
+ returned: always
+ type: list
+ sample: ['defg', 'new-label']
+ updated:
+ description: A list pre-existing labels whose values have been set.
+ returned: always
+ type: list
+ sample: ['defg', 'new-label']
+labels_obj:
+ description: API object.
+ returned: success
+ type: dict
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.api import basic_auth_argument_spec
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+from ansible_collections.community.general.plugins.module_utils.gitlab import (
+ auth_argument_spec, gitlab_authentication, ensure_gitlab_package, find_group, find_project, gitlab
+)
+
+
+class GitlabLabels(object):
+
+ def __init__(self, module, gitlab_instance, group_id, project_id):
+ self._gitlab = gitlab_instance
+ self.gitlab_object = group_id if group_id else project_id
+ self.is_group_label = True if group_id else False
+ self._module = module
+
+ def list_all_labels(self):
+ page_nb = 1
+ labels = []
+ vars_page = self.gitlab_object.labels.list(page=page_nb)
+ while len(vars_page) > 0:
+ labels += vars_page
+ page_nb += 1
+ vars_page = self.gitlab_object.labels.list(page=page_nb)
+ return labels
+
+ def create_label(self, var_obj):
+ if self._module.check_mode:
+ return True, True
+
+ var = {
+ "name": var_obj.get('name'),
+ "color": var_obj.get('color'),
+ }
+
+ if var_obj.get('description') is not None:
+ var["description"] = var_obj.get('description')
+
+ if var_obj.get('priority') is not None:
+ var["priority"] = var_obj.get('priority')
+
+ _obj = self.gitlab_object.labels.create(var)
+ return True, _obj.asdict()
+
+ def update_label(self, var_obj):
+ if self._module.check_mode:
+ return True, True
+ _label = self.gitlab_object.labels.get(var_obj.get('name'))
+
+ if var_obj.get('new_name') is not None:
+ _label.new_name = var_obj.get('new_name')
+
+ if var_obj.get('description') is not None:
+ _label.description = var_obj.get('description')
+ if var_obj.get('priority') is not None:
+ _label.priority = var_obj.get('priority')
+
+ # save returns None
+ _label.save()
+ return True, _label.asdict()
+
+ def delete_label(self, var_obj):
+ if self._module.check_mode:
+ return True, True
+ _label = self.gitlab_object.labels.get(var_obj.get('name'))
+ # delete returns None
+ _label.delete()
+ return True, _label.asdict()
+
+
+def compare(requested_labels, existing_labels, state):
+ # we need to do this, because it was determined in a previous version - more or less buggy
+ # basically it is not necessary and might result in more/other bugs!
+ # but it is required and only relevant for check mode!!
+ # logic represents state 'present' when not purge. all other can be derived from that
+ # untouched => equal in both
+ # updated => name and scope are equal
+ # added => name and scope does not exist
+ untouched = list()
+ updated = list()
+ added = list()
+
+ if state == 'present':
+ _existing_labels = list()
+ for item in existing_labels:
+ _existing_labels.append({'name': item.get('name')})
+
+ for var in requested_labels:
+ if var in existing_labels:
+ untouched.append(var)
+ else:
+ compare_item = {'name': var.get('name')}
+ if compare_item in _existing_labels:
+ updated.append(var)
+ else:
+ added.append(var)
+
+ return untouched, updated, added
+
+
+def native_python_main(this_gitlab, purge, requested_labels, state, module):
+ change = False
+ return_value = dict(added=[], updated=[], removed=[], untouched=[])
+ return_obj = dict(added=[], updated=[], removed=[])
+
+ labels_before = [x.asdict() for x in this_gitlab.list_all_labels()]
+
+ # filter out and enrich before compare
+ for item in requested_labels:
+ # add defaults when not present
+ if item.get('description') is None:
+ item['description'] = ""
+ if item.get('new_name') is None:
+ item['new_name'] = None
+ if item.get('priority') is None:
+ item['priority'] = None
+
+ # group label does not have priority, removing for comparison
+ if this_gitlab.is_group_label:
+ item.pop('priority')
+
+ for item in labels_before:
+ # remove field only from server
+ item.pop('id')
+ item.pop('description_html')
+ item.pop('text_color')
+ item.pop('subscribed')
+ # field present only when it's a project's label
+ if 'is_project_label' in item:
+ item.pop('is_project_label')
+ item['new_name'] = None
+
+ if state == 'present':
+ add_or_update = [x for x in requested_labels if x not in labels_before]
+ for item in add_or_update:
+ try:
+ _rv, _obj = this_gitlab.create_label(item)
+ if _rv:
+ return_value['added'].append(item)
+ return_obj['added'].append(_obj)
+ except Exception:
+ # create raises exception with following error message when label already exists
+ _rv, _obj = this_gitlab.update_label(item)
+ if _rv:
+ return_value['updated'].append(item)
+ return_obj['updated'].append(_obj)
+
+ if purge:
+ # re-fetch
+ _labels = this_gitlab.list_all_labels()
+
+ for item in labels_before:
+ _rv, _obj = this_gitlab.delete_label(item)
+ if _rv:
+ return_value['removed'].append(item)
+ return_obj['removed'].append(_obj)
+
+ elif state == 'absent':
+ if not purge:
+ _label_names_requested = [x['name'] for x in requested_labels]
+ remove_requested = [x for x in labels_before if x['name'] in _label_names_requested]
+ for item in remove_requested:
+ _rv, _obj = this_gitlab.delete_label(item)
+ if _rv:
+ return_value['removed'].append(item)
+ return_obj['removed'].append(_obj)
+ else:
+ for item in labels_before:
+ _rv, _obj = this_gitlab.delete_label(item)
+ if _rv:
+ return_value['removed'].append(item)
+ return_obj['removed'].append(_obj)
+
+ if module.check_mode:
+ _untouched, _updated, _added = compare(requested_labels, labels_before, state)
+ return_value = dict(added=_added, updated=_updated, removed=return_value['removed'], untouched=_untouched)
+
+ if any(return_value[x] for x in ['added', 'removed', 'updated']):
+ change = True
+
+ labels_after = [x.asdict() for x in this_gitlab.list_all_labels()]
+
+ return change, return_value, labels_before, labels_after, return_obj
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(auth_argument_spec())
+ argument_spec.update(
+ project=dict(type='str', required=False, default=None),
+ group=dict(type='str', required=False, default=None),
+ purge=dict(type='bool', required=False, default=False),
+ labels=dict(type='list', elements='dict', required=False, default=list(),
+ options=dict(
+ name=dict(type='str', required=True),
+ color=dict(type='str', required=False),
+ description=dict(type='str', required=False),
+ priority=dict(type='int', required=False),
+ new_name=dict(type='str', required=False),)
+ ),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_username', 'api_oauth_token'],
+ ['api_username', 'api_job_token'],
+ ['api_token', 'api_oauth_token'],
+ ['api_token', 'api_job_token'],
+ ['project', 'group'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'],
+ ['project', 'group']
+ ],
+ supports_check_mode=True
+ )
+ ensure_gitlab_package(module)
+
+ gitlab_project = module.params['project']
+ gitlab_group = module.params['group']
+ purge = module.params['purge']
+ label_list = module.params['labels']
+ state = module.params['state']
+
+ gitlab_version = gitlab.__version__
+ _min_gitlab = '3.2.0'
+ if LooseVersion(gitlab_version) < LooseVersion(_min_gitlab):
+ module.fail_json(msg="community.general.gitlab_label requires python-gitlab Python module >= %s "
+ "(installed version: [%s]). Please upgrade "
+ "python-gitlab to version %s or above." % (_min_gitlab, gitlab_version, _min_gitlab))
+
+ gitlab_instance = gitlab_authentication(module)
+
+ # find_project can return None, but the other must exist
+ gitlab_project_id = find_project(gitlab_instance, gitlab_project)
+
+ # find_group can return None, but the other must exist
+ gitlab_group_id = find_group(gitlab_instance, gitlab_group)
+
+ # if both not found, module must exist
+ if not gitlab_project_id and not gitlab_group_id:
+ if gitlab_project and not gitlab_project_id:
+ module.fail_json(msg="project '%s' not found." % gitlab_project)
+ if gitlab_group and not gitlab_group_id:
+ module.fail_json(msg="group '%s' not found." % gitlab_group)
+
+ this_gitlab = GitlabLabels(module=module, gitlab_instance=gitlab_instance, group_id=gitlab_group_id,
+ project_id=gitlab_project_id)
+
+ if state == 'present':
+ _existing_labels = [x.asdict()['name'] for x in this_gitlab.list_all_labels()]
+
+ # color is mandatory when creating label, but it's optional when changing name or updating other fields
+ if any(x['color'] is None and x['new_name'] is None and x['name'] not in _existing_labels for x in label_list):
+ module.fail_json(msg='color parameter is required for new labels')
+
+ change, raw_return_value, before, after, _obj = native_python_main(this_gitlab, purge, label_list, state, module)
+
+ if not module.check_mode:
+ raw_return_value['untouched'] = [x for x in before if x in after]
+
+ added = [x.get('name') for x in raw_return_value['added']]
+ updated = [x.get('name') for x in raw_return_value['updated']]
+ removed = [x.get('name') for x in raw_return_value['removed']]
+ untouched = [x.get('name') for x in raw_return_value['untouched']]
+ return_value = dict(added=added, updated=updated, removed=removed, untouched=untouched)
+
+ module.exit_json(changed=change, labels=return_value, labels_obj=_obj)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_merge_request.py b/ansible_collections/community/general/plugins/modules/gitlab_merge_request.py
new file mode 100644
index 000000000..5bb9cb9c7
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/gitlab_merge_request.py
@@ -0,0 +1,416 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2023, Ondrej Zvara (ozvara1@gmail.com)
+# Based on code:
+# Copyright (c) 2021, Lennert Mertens (lennert@nubera.be)
+# Copyright (c) 2021, Werner Dijkerman (ikben@werner-dijkerman.nl)
+# Copyright (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
+# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: gitlab_merge_request
+short_description: Create, update, or delete GitLab merge requests
+version_added: 7.1.0
+description:
+ - Creates a merge request if it does not exist.
+ - When a single merge request does exist, it will be updated if the provided parameters are different.
+ - When a single merge request does exist and O(state=absent), the merge request will be deleted.
+ - When multiple merge requests are detected, the task fails.
+ - Existing merge requests are matched based on O(title), O(source_branch), O(target_branch),
+ and O(state_filter) filters.
+author:
+ - zvaraondrej (@zvaraondrej)
+requirements:
+ - python-gitlab >= 2.3.0
+extends_documentation_fragment:
+ - community.general.auth_basic
+ - community.general.gitlab
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ state:
+ description:
+ - Create or delete merge request.
+ default: present
+ type: str
+ choices: ["present", "absent"]
+ project:
+ description:
+ - The path or name of the project.
+ required: true
+ type: str
+ source_branch:
+ description:
+ - Merge request's source branch.
+ - Ignored while updating existing merge request.
+ required: true
+ type: str
+ target_branch:
+ description:
+ - Merge request's target branch.
+ required: true
+ type: str
+ title:
+ description:
+ - A title for the merge request.
+ type: str
+ required: true
+ description:
+ description:
+ - A description for the merge request.
+ - Gets overridden by a content of file specified at O(description_path), if found.
+ type: str
+ description_path:
+ description:
+ - A path of file containing merge request's description.
+ - Accepts MarkDown formatted files.
+ type: path
+ labels:
+ description:
+ - Comma separated list of label names.
+ type: str
+ default: ""
+ remove_source_branch:
+ description:
+ - Flag indicating if a merge request should remove the source branch when merging.
+ type: bool
+ default: false
+ state_filter:
+ description:
+ - Filter specifying state of merge requests while searching.
+ type: str
+ choices: ["opened", "closed", "locked", "merged"]
+ default: opened
+ assignee_ids:
+ description:
+ - Comma separated list of assignees usernames omitting V(@) character.
+ - Set to empty string to unassign all assignees.
+ type: str
+ reviewer_ids:
+ description:
+ - Comma separated list of reviewers usernames omitting V(@) character.
+ - Set to empty string to unassign all reviewers.
+ type: str
+'''
+
+
+EXAMPLES = '''
+- name: Create Merge Request from branch1 to branch2
+ community.general.gitlab_merge_request:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: "group1/project1"
+ source_branch: branch1
+ target_branch: branch2
+ title: "Ansible demo MR"
+ description: "Demo MR description"
+ labels: "Ansible,Demo"
+ state_filter: "opened"
+ remove_source_branch: True
+ state: present
+
+- name: Delete Merge Request from branch1 to branch2
+ community.general.gitlab_merge_request:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: "group1/project1"
+ source_branch: branch1
+ target_branch: branch2
+ title: "Ansible demo MR"
+ state_filter: "opened"
+ state: absent
+'''
+
+RETURN = r'''
+msg:
+ description: Success or failure message.
+ returned: always
+ type: str
+ sample: "Success"
+
+mr:
+ description: API object.
+ returned: success
+ type: dict
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.common.text.converters import to_native, to_text
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+from ansible_collections.community.general.plugins.module_utils.gitlab import (
+ auth_argument_spec, gitlab_authentication, gitlab, find_project
+)
+
+
+class GitlabMergeRequest(object):
+
+ def __init__(self, module, project, gitlab_instance):
+ self._gitlab = gitlab_instance
+ self._module = module
+ self.project = project
+
+ '''
+ @param branch Name of the branch
+ '''
+ def get_branch(self, branch):
+ try:
+ return self.project.branches.get(branch)
+ except gitlab.exceptions.GitlabGetError as e:
+ self._module.fail_json(msg="Failed to get the branch: %s" % to_native(e))
+
+ '''
+ @param title Title of the Merge Request
+ @param source_branch Merge Request's source branch
+ @param target_branch Merge Request's target branch
+ @param state_filter Merge Request's state to filter on
+ '''
+ def get_mr(self, title, source_branch, target_branch, state_filter):
+ mrs = []
+ try:
+ mrs = self.project.mergerequests.list(search=title, source_branch=source_branch, target_branch=target_branch, state=state_filter)
+ except gitlab.exceptions.GitlabGetError as e:
+ self._module.fail_json(msg="Failed to list the Merge Request: %s" % to_native(e))
+
+ if len(mrs) > 1:
+ self._module.fail_json(msg="Multiple Merge Requests matched search criteria.")
+ if len(mrs) == 1:
+ try:
+ return self.project.mergerequests.get(id=mrs[0].iid)
+ except gitlab.exceptions.GitlabGetError as e:
+ self._module.fail_json(msg="Failed to get the Merge Request: %s" % to_native(e))
+
+ '''
+ @param username Name of the user
+ '''
+ def get_user(self, username):
+ users = []
+ try:
+ users = [user for user in self.project.users.list(username=username, all=True) if user.username == username]
+ except gitlab.exceptions.GitlabGetError as e:
+ self._module.fail_json(msg="Failed to list the users: %s" % to_native(e))
+
+ if len(users) > 1:
+ self._module.fail_json(msg="Multiple Users matched search criteria.")
+ elif len(users) < 1:
+ self._module.fail_json(msg="No User matched search criteria.")
+ else:
+ return users[0]
+
+ '''
+ @param users List of usernames
+ '''
+ def get_user_ids(self, users):
+ return [self.get_user(user).id for user in users]
+
+ '''
+ @param options Options of the Merge Request
+ '''
+ def create_mr(self, options):
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created the Merge Request %s" % options["title"])
+
+ try:
+ return self.project.mergerequests.create(options)
+ except gitlab.exceptions.GitlabCreateError as e:
+ self._module.fail_json(msg="Failed to create Merge Request: %s " % to_native(e))
+
+ '''
+ @param mr Merge Request object to delete
+ '''
+ def delete_mr(self, mr):
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully deleted the Merge Request %s" % mr["title"])
+
+ try:
+ return mr.delete()
+ except gitlab.exceptions.GitlabDeleteError as e:
+ self._module.fail_json(msg="Failed to delete Merge Request: %s " % to_native(e))
+
+ '''
+ @param mr Merge Request object to update
+ '''
+ def update_mr(self, mr, options):
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully updated the Merge Request %s" % mr["title"])
+
+ try:
+ return self.project.mergerequests.update(mr.iid, options)
+ except gitlab.exceptions.GitlabUpdateError as e:
+ self._module.fail_json(msg="Failed to update Merge Request: %s " % to_native(e))
+
+ '''
+ @param mr Merge Request object to evaluate
+ @param options New options to update MR with
+ '''
+ def mr_has_changed(self, mr, options):
+ for key, value in options.items():
+ if value is not None:
+ # see https://gitlab.com/gitlab-org/gitlab-foss/-/issues/27355
+ if key == 'remove_source_branch':
+ key = 'force_remove_source_branch'
+
+ if key == 'assignee_ids':
+ if options[key] != sorted([user["id"] for user in getattr(mr, 'assignees')]):
+ return True
+
+ elif key == 'reviewer_ids':
+ if options[key] != sorted([user["id"] for user in getattr(mr, 'reviewers')]):
+ return True
+
+ elif key == 'labels':
+ if options[key] != sorted(getattr(mr, key)):
+ return True
+
+ elif getattr(mr, key) != value:
+ return True
+
+ return False
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(auth_argument_spec())
+ argument_spec.update(
+ project=dict(type='str', required=True),
+ source_branch=dict(type='str', required=True),
+ target_branch=dict(type='str', required=True),
+ title=dict(type='str', required=True),
+ description=dict(type='str', required=False),
+ labels=dict(type='str', default="", required=False),
+ description_path=dict(type='path', required=False),
+ remove_source_branch=dict(type='bool', default=False, required=False),
+ state_filter=dict(type='str', default="opened", choices=["opened", "closed", "locked", "merged"]),
+ assignee_ids=dict(type='str', required=False),
+ reviewer_ids=dict(type='str', required=False),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_username', 'api_oauth_token'],
+ ['api_username', 'api_job_token'],
+ ['api_token', 'api_oauth_token'],
+ ['api_token', 'api_job_token'],
+ ['description', 'description_path'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token', 'api_oauth_token', 'api_job_token']
+ ],
+ required_if=[
+ ['state', 'present', ['source_branch', 'target_branch', 'title'], True],
+ ['state', 'absent', ['source_branch', 'target_branch', 'title'], True],
+ ],
+ supports_check_mode=True
+ )
+
+ # check prerequisites and connect to gitlab server
+ gitlab_instance = gitlab_authentication(module)
+
+ project = module.params['project']
+ source_branch = module.params['source_branch']
+ target_branch = module.params['target_branch']
+ title = module.params['title']
+ description = module.params['description']
+ labels = module.params['labels']
+ description_path = module.params['description_path']
+ remove_source_branch = module.params['remove_source_branch']
+ state_filter = module.params['state_filter']
+ assignee_ids = module.params['assignee_ids']
+ reviewer_ids = module.params['reviewer_ids']
+ state = module.params['state']
+
+ gitlab_version = gitlab.__version__
+ if LooseVersion(gitlab_version) < LooseVersion('2.3.0'):
+ module.fail_json(msg="community.general.gitlab_merge_request requires python-gitlab Python module >= 2.3.0 (installed version: [%s])."
+ " Please upgrade python-gitlab to version 2.3.0 or above." % gitlab_version)
+
+ this_project = find_project(gitlab_instance, project)
+ if this_project is None:
+ module.fail_json(msg="Failed to get the project: %s" % project)
+
+ this_gitlab = GitlabMergeRequest(module=module, project=this_project, gitlab_instance=gitlab_instance)
+
+ r_source_branch = this_gitlab.get_branch(source_branch)
+ if not r_source_branch:
+ module.fail_json(msg="Source branch {b} not exist.".format(b=r_source_branch))
+
+ r_target_branch = this_gitlab.get_branch(target_branch)
+ if not r_target_branch:
+ module.fail_json(msg="Destination branch {b} not exist.".format(b=r_target_branch))
+
+ this_mr = this_gitlab.get_mr(title, source_branch, target_branch, state_filter)
+
+ if state == "present":
+ if description_path:
+ try:
+ with open(description_path, 'rb') as f:
+ description = to_text(f.read(), errors='surrogate_or_strict')
+ except IOError as e:
+ module.fail_json(msg='Cannot open {0}: {1}'.format(description_path, e))
+
+ # sorting necessary in order to properly detect changes, as we don't want to get false positive
+ # results due to differences in ids ordering; see `mr_has_changed()`
+ assignee_ids = sorted(this_gitlab.get_user_ids(assignee_ids.split(","))) if assignee_ids else []
+ reviewer_ids = sorted(this_gitlab.get_user_ids(reviewer_ids.split(","))) if reviewer_ids else []
+ labels = sorted(labels.split(",")) if labels else []
+
+ options = {
+ "target_branch": target_branch,
+ "title": title,
+ "description": description,
+ "labels": labels,
+ "remove_source_branch": remove_source_branch,
+ "reviewer_ids": reviewer_ids,
+ "assignee_ids": assignee_ids,
+ }
+
+ if not this_mr:
+ options["source_branch"] = source_branch
+
+ mr = this_gitlab.create_mr(options)
+ module.exit_json(
+ changed=True, msg="Created the Merge Request {t} from branch {s} to branch {d}.".format(t=title, d=target_branch, s=source_branch),
+ mr=mr.asdict()
+ )
+ else:
+ if this_gitlab.mr_has_changed(this_mr, options):
+ mr = this_gitlab.update_mr(this_mr, options)
+ module.exit_json(
+ changed=True, msg="Merge Request {t} from branch {s} to branch {d} updated.".format(t=title, d=target_branch, s=source_branch),
+ mr=mr
+ )
+ else:
+ module.exit_json(
+ changed=False, msg="Merge Request {t} from branch {s} to branch {d} already exist".format(t=title, d=target_branch, s=source_branch),
+ mr=this_mr.asdict()
+ )
+ elif this_mr and state == "absent":
+ mr = this_gitlab.delete_mr(this_mr)
+ module.exit_json(
+ changed=True, msg="Merge Request {t} from branch {s} to branch {d} deleted.".format(t=title, d=target_branch, s=source_branch),
+ mr=mr
+ )
+ else:
+ module.exit_json(changed=False, msg="No changes are needed.", mr=this_mr.asdict())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_milestone.py b/ansible_collections/community/general/plugins/modules/gitlab_milestone.py
new file mode 100644
index 000000000..0a616ea47
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/gitlab_milestone.py
@@ -0,0 +1,496 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2023, Gabriele Pongelli (gabriele.pongelli@gmail.com)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: gitlab_milestone
+short_description: Creates/updates/deletes GitLab Milestones belonging to project or group
+version_added: 8.3.0
+description:
+ - When a milestone does not exist, it will be created.
+ - When a milestone does exist, its value will be updated when the values are different.
+ - Milestones can be purged.
+author:
+ - "Gabriele Pongelli (@gpongelli)"
+requirements:
+ - python-gitlab python module
+extends_documentation_fragment:
+ - community.general.auth_basic
+ - community.general.gitlab
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ state:
+ description:
+ - Create or delete milestone.
+ default: present
+ type: str
+ choices: ["present", "absent"]
+ purge:
+ description:
+ - When set to V(true), delete all milestone which are not mentioned in the task.
+ default: false
+ type: bool
+ required: false
+ project:
+ description:
+ - The path and name of the project. Either this or O(group) is required.
+ required: false
+ type: str
+ group:
+ description:
+ - The path of the group. Either this or O(project) is required.
+ required: false
+ type: str
+ milestones:
+ description:
+ - A list of dictionaries that represents gitlab project's or group's milestones.
+ type: list
+ elements: dict
+ required: false
+ default: []
+ suboptions:
+ title:
+ description:
+ - The name of the milestone.
+ type: str
+ required: true
+ due_date:
+ description:
+ - Milestone due date in YYYY-MM-DD format.
+ type: str
+ required: false
+ default: null
+ start_date:
+ description:
+ - Milestone start date in YYYY-MM-DD format.
+ type: str
+ required: false
+ default: null
+ description:
+ description:
+ - Milestone's description.
+ type: str
+ default: null
+'''
+
+
+EXAMPLES = '''
+# same project's task can be executed for group
+- name: Create one milestone
+ community.general.gitlab_milestone:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: "group1/project1"
+ milestones:
+ - title: milestone_one
+ start_date: "2024-01-04"
+ state: present
+
+- name: Create many group milestones
+ community.general.gitlab_milestone:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ group: "group1"
+ milestones:
+ - title: milestone_one
+ start_date: "2024-01-04"
+ description: this is a milestone
+ due_date: "2024-02-04"
+ - title: milestone_two
+ state: present
+
+- name: Create many project milestones
+ community.general.gitlab_milestone:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: "group1/project1"
+ milestones:
+ - title: milestone_one
+ start_date: "2024-01-04"
+ description: this is a milestone
+ due_date: "2024-02-04"
+ - title: milestone_two
+ state: present
+
+- name: Set or update some milestones
+ community.general.gitlab_milestone:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: "group1/project1"
+ milestones:
+ - title: milestone_one
+ start_date: "2024-05-04"
+ state: present
+
+- name: Add milestone in check mode
+ community.general.gitlab_milestone:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: "group1/project1"
+ milestones:
+ - title: milestone_one
+ start_date: "2024-05-04"
+ check_mode: true
+
+- name: Delete milestone
+ community.general.gitlab_milestone:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: "group1/project1"
+ milestones:
+ - title: milestone_one
+ state: absent
+
+- name: Purge all milestones
+ community.general.gitlab_milestone:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: "group1/project1"
+ purge: true
+
+- name: Delete many milestones
+ community.general.gitlab_milestone:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: "group1/project1"
+ state: absent
+ milestones:
+ - title: milestone-abc123
+ - title: milestone-two
+'''
+
+RETURN = '''
+milestones:
+ description: Four lists of the milestones which were added, updated, removed or exist.
+ returned: success
+ type: dict
+ contains:
+ added:
+ description: A list of milestones which were created.
+ returned: always
+ type: list
+ sample: ['abcd', 'milestone-one']
+ untouched:
+ description: A list of milestones which exist.
+ returned: always
+ type: list
+ sample: ['defg', 'new-milestone']
+ removed:
+ description: A list of milestones which were deleted.
+ returned: always
+ type: list
+ sample: ['defg', 'new-milestone']
+ updated:
+ description: A list pre-existing milestones whose values have been set.
+ returned: always
+ type: list
+ sample: ['defg', 'new-milestone']
+milestones_obj:
+ description: API object.
+ returned: success
+ type: dict
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.api import basic_auth_argument_spec
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+from ansible_collections.community.general.plugins.module_utils.gitlab import (
+ auth_argument_spec, gitlab_authentication, ensure_gitlab_package, find_group, find_project, gitlab
+)
+from datetime import datetime
+
+
+class GitlabMilestones(object):
+
+ def __init__(self, module, gitlab_instance, group_id, project_id):
+ self._gitlab = gitlab_instance
+ self.gitlab_object = group_id if group_id else project_id
+ self.is_group_milestone = True if group_id else False
+ self._module = module
+
+ def list_all_milestones(self):
+ page_nb = 1
+ milestones = []
+ vars_page = self.gitlab_object.milestones.list(page=page_nb)
+ while len(vars_page) > 0:
+ milestones += vars_page
+ page_nb += 1
+ vars_page = self.gitlab_object.milestones.list(page=page_nb)
+ return milestones
+
+ def create_milestone(self, var_obj):
+ if self._module.check_mode:
+ return True, True
+
+ var = {
+ "title": var_obj.get('title'),
+ }
+
+ if var_obj.get('description') is not None:
+ var["description"] = var_obj.get('description')
+
+ if var_obj.get('start_date') is not None:
+ var["start_date"] = self.check_date(var_obj.get('start_date'))
+
+ if var_obj.get('due_date') is not None:
+ var["due_date"] = self.check_date(var_obj.get('due_date'))
+
+ _obj = self.gitlab_object.milestones.create(var)
+ return True, _obj.asdict()
+
+ def update_milestone(self, var_obj):
+ if self._module.check_mode:
+ return True, True
+ _milestone = self.gitlab_object.milestones.get(self.get_milestone_id(var_obj.get('title')))
+
+ if var_obj.get('description') is not None:
+ _milestone.description = var_obj.get('description')
+
+ if var_obj.get('start_date') is not None:
+ _milestone.start_date = var_obj.get('start_date')
+
+ if var_obj.get('due_date') is not None:
+ _milestone.due_date = var_obj.get('due_date')
+
+ # save returns None
+ _milestone.save()
+ return True, _milestone.asdict()
+
+ def get_milestone_id(self, _title):
+ _milestone_list = self.gitlab_object.milestones.list()
+ _found = list(filter(lambda x: x.title == _title, _milestone_list))
+ if _found:
+ return _found[0].id
+ else:
+ self._module.fail_json(msg="milestone '%s' not found." % _title)
+
+ def check_date(self, _date):
+ try:
+ datetime.strptime(_date, '%Y-%m-%d')
+ except ValueError:
+ self._module.fail_json(msg="milestone's date '%s' not in correct format." % _date)
+ return _date
+
+ def delete_milestone(self, var_obj):
+ if self._module.check_mode:
+ return True, True
+ _milestone = self.gitlab_object.milestones.get(self.get_milestone_id(var_obj.get('title')))
+ # delete returns None
+ _milestone.delete()
+ return True, _milestone.asdict()
+
+
+def compare(requested_milestones, existing_milestones, state):
+ # we need to do this, because it was determined in a previous version - more or less buggy
+ # basically it is not necessary and might result in more/other bugs!
+ # but it is required and only relevant for check mode!!
+ # logic represents state 'present' when not purge. all other can be derived from that
+ # untouched => equal in both
+ # updated => title are equal
+ # added => title does not exist
+ untouched = list()
+ updated = list()
+ added = list()
+
+ if state == 'present':
+ _existing_milestones = list()
+ for item in existing_milestones:
+ _existing_milestones.append({'title': item.get('title')})
+
+ for var in requested_milestones:
+ if var in existing_milestones:
+ untouched.append(var)
+ else:
+ compare_item = {'title': var.get('title')}
+ if compare_item in _existing_milestones:
+ updated.append(var)
+ else:
+ added.append(var)
+
+ return untouched, updated, added
+
+
+def native_python_main(this_gitlab, purge, requested_milestones, state, module):
+ change = False
+ return_value = dict(added=[], updated=[], removed=[], untouched=[])
+ return_obj = dict(added=[], updated=[], removed=[])
+
+ milestones_before = [x.asdict() for x in this_gitlab.list_all_milestones()]
+
+ # filter out and enrich before compare
+ for item in requested_milestones:
+ # add defaults when not present
+ if item.get('description') is None:
+ item['description'] = ""
+ if item.get('due_date') is None:
+ item['due_date'] = None
+ if item.get('start_date') is None:
+ item['start_date'] = None
+
+ for item in milestones_before:
+ # remove field only from server
+ item.pop('id')
+ item.pop('iid')
+ item.pop('created_at')
+ item.pop('expired')
+ item.pop('state')
+ item.pop('updated_at')
+ item.pop('web_url')
+ # group milestone has group_id, while project has project_id
+ if 'group_id' in item:
+ item.pop('group_id')
+ if 'project_id' in item:
+ item.pop('project_id')
+
+ if state == 'present':
+ add_or_update = [x for x in requested_milestones if x not in milestones_before]
+ for item in add_or_update:
+ try:
+ _rv, _obj = this_gitlab.create_milestone(item)
+ if _rv:
+ return_value['added'].append(item)
+ return_obj['added'].append(_obj)
+ except Exception:
+ # create raises exception with following error message when milestone already exists
+ _rv, _obj = this_gitlab.update_milestone(item)
+ if _rv:
+ return_value['updated'].append(item)
+ return_obj['updated'].append(_obj)
+
+ if purge:
+ # re-fetch
+ _milestones = this_gitlab.list_all_milestones()
+
+ for item in milestones_before:
+ _rv, _obj = this_gitlab.delete_milestone(item)
+ if _rv:
+ return_value['removed'].append(item)
+ return_obj['removed'].append(_obj)
+
+ elif state == 'absent':
+ if not purge:
+ _milestone_titles_requested = [x['title'] for x in requested_milestones]
+ remove_requested = [x for x in milestones_before if x['title'] in _milestone_titles_requested]
+ for item in remove_requested:
+ _rv, _obj = this_gitlab.delete_milestone(item)
+ if _rv:
+ return_value['removed'].append(item)
+ return_obj['removed'].append(_obj)
+ else:
+ for item in milestones_before:
+ _rv, _obj = this_gitlab.delete_milestone(item)
+ if _rv:
+ return_value['removed'].append(item)
+ return_obj['removed'].append(_obj)
+
+ if module.check_mode:
+ _untouched, _updated, _added = compare(requested_milestones, milestones_before, state)
+ return_value = dict(added=_added, updated=_updated, removed=return_value['removed'], untouched=_untouched)
+
+ if any(return_value[x] for x in ['added', 'removed', 'updated']):
+ change = True
+
+ milestones_after = [x.asdict() for x in this_gitlab.list_all_milestones()]
+
+ return change, return_value, milestones_before, milestones_after, return_obj
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(auth_argument_spec())
+ argument_spec.update(
+ project=dict(type='str', required=False, default=None),
+ group=dict(type='str', required=False, default=None),
+ purge=dict(type='bool', required=False, default=False),
+ milestones=dict(type='list', elements='dict', required=False, default=list(),
+ options=dict(
+ title=dict(type='str', required=True),
+ description=dict(type='str', required=False),
+ due_date=dict(type='str', required=False),
+ start_date=dict(type='str', required=False),)
+ ),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_username', 'api_oauth_token'],
+ ['api_username', 'api_job_token'],
+ ['api_token', 'api_oauth_token'],
+ ['api_token', 'api_job_token'],
+ ['project', 'group'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'],
+ ['project', 'group']
+ ],
+ supports_check_mode=True
+ )
+ ensure_gitlab_package(module)
+
+ gitlab_project = module.params['project']
+ gitlab_group = module.params['group']
+ purge = module.params['purge']
+ milestone_list = module.params['milestones']
+ state = module.params['state']
+
+ gitlab_version = gitlab.__version__
+ _min_gitlab = '3.2.0'
+ if LooseVersion(gitlab_version) < LooseVersion(_min_gitlab):
+ module.fail_json(msg="community.general.gitlab_milestone requires python-gitlab Python module >= %s "
+ "(installed version: [%s]). Please upgrade "
+ "python-gitlab to version %s or above." % (_min_gitlab, gitlab_version, _min_gitlab))
+
+ gitlab_instance = gitlab_authentication(module)
+
+ # find_project can return None, but the other must exist
+ gitlab_project_id = find_project(gitlab_instance, gitlab_project)
+
+ # find_group can return None, but the other must exist
+ gitlab_group_id = find_group(gitlab_instance, gitlab_group)
+
+ # if both not found, module must exist
+ if not gitlab_project_id and not gitlab_group_id:
+ if gitlab_project and not gitlab_project_id:
+ module.fail_json(msg="project '%s' not found." % gitlab_project)
+ if gitlab_group and not gitlab_group_id:
+ module.fail_json(msg="group '%s' not found." % gitlab_group)
+
+ this_gitlab = GitlabMilestones(module=module, gitlab_instance=gitlab_instance, group_id=gitlab_group_id,
+ project_id=gitlab_project_id)
+
+ change, raw_return_value, before, after, _obj = native_python_main(this_gitlab, purge, milestone_list, state,
+ module)
+
+ if not module.check_mode:
+ raw_return_value['untouched'] = [x for x in before if x in after]
+
+ added = [x.get('title') for x in raw_return_value['added']]
+ updated = [x.get('title') for x in raw_return_value['updated']]
+ removed = [x.get('title') for x in raw_return_value['removed']]
+ untouched = [x.get('title') for x in raw_return_value['untouched']]
+ return_value = dict(added=added, updated=updated, removed=removed, untouched=untouched)
+
+ module.exit_json(changed=change, milestones=return_value, milestones_obj=_obj)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_project.py b/ansible_collections/community/general/plugins/modules/gitlab_project.py
index db360d578..f1b96bfac 100644
--- a/ansible_collections/community/general/plugins/modules/gitlab_project.py
+++ b/ansible_collections/community/general/plugins/modules/gitlab_project.py
@@ -15,13 +15,12 @@ module: gitlab_project
short_description: Creates/updates/deletes GitLab Projects
description:
- When the project does not exist in GitLab, it will be created.
- - When the project does exists and I(state=absent), the project will be deleted.
+ - When the project does exists and O(state=absent), the project will be deleted.
- When changes are made to the project, the project will be updated.
author:
- Werner Dijkerman (@dj-wasabi)
- Guillaume Martinez (@Lunik)
requirements:
- - python >= 2.7
- python-gitlab python module
extends_documentation_fragment:
- community.general.auth_basic
@@ -84,9 +83,9 @@ options:
default: true
visibility:
description:
- - C(private) Project access must be granted explicitly for each user.
- - C(internal) The project can be cloned by any logged in user.
- - C(public) The project can be cloned without any authentication.
+ - V(private) Project access must be granted explicitly for each user.
+ - V(internal) The project can be cloned by any logged in user.
+ - V(public) The project can be cloned without any authentication.
default: private
type: str
choices: ["private", "internal", "public"]
@@ -108,7 +107,7 @@ options:
merge_method:
description:
- What requirements are placed upon merges.
- - Possible values are C(merge), C(rebase_merge) merge commit with semi-linear history, C(ff) fast-forward merges only.
+ - Possible values are V(merge), V(rebase_merge) merge commit with semi-linear history, V(ff) fast-forward merges only.
type: str
choices: ["ff", "merge", "rebase_merge"]
default: merge
@@ -175,79 +174,81 @@ options:
version_added: "4.2.0"
default_branch:
description:
- - Default branch name for a new project.
- - This option is only used on creation, not for updates. This is also only used if I(initialize_with_readme=true).
+ - The default branch name for this project.
+ - For project creation, this option requires O(initialize_with_readme=true).
+ - For project update, the branch must exist.
+ - Supports project's default branch update since community.general 8.0.0.
type: str
version_added: "4.2.0"
builds_access_level:
description:
- - C(private) means that repository CI/CD is allowed only to project members.
- - C(disabled) means that repository CI/CD is disabled.
- - C(enabled) means that repository CI/CD is enabled.
+ - V(private) means that repository CI/CD is allowed only to project members.
+ - V(disabled) means that repository CI/CD is disabled.
+ - V(enabled) means that repository CI/CD is enabled.
type: str
choices: ["private", "disabled", "enabled"]
version_added: "6.2.0"
forking_access_level:
description:
- - C(private) means that repository forks is allowed only to project members.
- - C(disabled) means that repository forks are disabled.
- - C(enabled) means that repository forks are enabled.
+ - V(private) means that repository forks is allowed only to project members.
+ - V(disabled) means that repository forks are disabled.
+ - V(enabled) means that repository forks are enabled.
type: str
choices: ["private", "disabled", "enabled"]
version_added: "6.2.0"
container_registry_access_level:
description:
- - C(private) means that container registry is allowed only to project members.
- - C(disabled) means that container registry is disabled.
- - C(enabled) means that container registry is enabled.
+ - V(private) means that container registry is allowed only to project members.
+ - V(disabled) means that container registry is disabled.
+ - V(enabled) means that container registry is enabled.
type: str
choices: ["private", "disabled", "enabled"]
version_added: "6.2.0"
releases_access_level:
description:
- - C(private) means that accessing release is allowed only to project members.
- - C(disabled) means that accessing release is disabled.
- - C(enabled) means that accessing release is enabled.
+ - V(private) means that accessing release is allowed only to project members.
+ - V(disabled) means that accessing release is disabled.
+ - V(enabled) means that accessing release is enabled.
type: str
choices: ["private", "disabled", "enabled"]
version_added: "6.4.0"
environments_access_level:
description:
- - C(private) means that deployment to environment is allowed only to project members.
- - C(disabled) means that deployment to environment is disabled.
- - C(enabled) means that deployment to environment is enabled.
+ - V(private) means that deployment to environment is allowed only to project members.
+ - V(disabled) means that deployment to environment is disabled.
+ - V(enabled) means that deployment to environment is enabled.
type: str
choices: ["private", "disabled", "enabled"]
version_added: "6.4.0"
feature_flags_access_level:
description:
- - C(private) means that feature rollout is allowed only to project members.
- - C(disabled) means that feature rollout is disabled.
- - C(enabled) means that feature rollout is enabled.
+ - V(private) means that feature rollout is allowed only to project members.
+ - V(disabled) means that feature rollout is disabled.
+ - V(enabled) means that feature rollout is enabled.
type: str
choices: ["private", "disabled", "enabled"]
version_added: "6.4.0"
infrastructure_access_level:
description:
- - C(private) means that configuring infrastructure is allowed only to project members.
- - C(disabled) means that configuring infrastructure is disabled.
- - C(enabled) means that configuring infrastructure is enabled.
+ - V(private) means that configuring infrastructure is allowed only to project members.
+ - V(disabled) means that configuring infrastructure is disabled.
+ - V(enabled) means that configuring infrastructure is enabled.
type: str
choices: ["private", "disabled", "enabled"]
version_added: "6.4.0"
monitor_access_level:
description:
- - C(private) means that monitoring health is allowed only to project members.
- - C(disabled) means that monitoring health is disabled.
- - C(enabled) means that monitoring health is enabled.
+ - V(private) means that monitoring health is allowed only to project members.
+ - V(disabled) means that monitoring health is disabled.
+ - V(enabled) means that monitoring health is enabled.
type: str
choices: ["private", "disabled", "enabled"]
version_added: "6.4.0"
security_and_compliance_access_level:
description:
- - C(private) means that accessing security and complicance tab is allowed only to project members.
- - C(disabled) means that accessing security and complicance tab is disabled.
- - C(enabled) means that accessing security and complicance tab is enabled.
+ - V(private) means that accessing security and complicance tab is allowed only to project members.
+ - V(disabled) means that accessing security and complicance tab is disabled.
+ - V(enabled) means that accessing security and complicance tab is enabled.
type: str
choices: ["private", "disabled", "enabled"]
version_added: "6.4.0"
@@ -272,7 +273,6 @@ EXAMPLES = r'''
community.general.gitlab_project:
api_url: https://gitlab.example.com/
api_token: "{{ access_token }}"
- validate_certs: false
name: my_first_project
state: absent
delegate_to: localhost
@@ -338,7 +338,7 @@ from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.general.plugins.module_utils.gitlab import (
- auth_argument_spec, find_group, find_project, gitlab_authentication, gitlab, ensure_gitlab_package
+ auth_argument_spec, find_group, find_project, gitlab_authentication, gitlab
)
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
@@ -355,7 +355,7 @@ class GitLabProject(object):
@param namespace Namespace Object (User or Group)
@param options Options of the project
'''
- def create_or_update_project(self, project_name, namespace, options):
+ def create_or_update_project(self, module, project_name, namespace, options):
changed = False
project_options = {
'name': project_name,
@@ -395,6 +395,8 @@ class GitLabProject(object):
# Because we have already call userExists in main()
if self.project_object is None:
+ if options['default_branch'] and not options['initialize_with_readme']:
+ module.fail_json(msg="Param default_branch need param initialize_with_readme set to true")
project_options.update({
'path': options['path'],
'import_url': options['import_url'],
@@ -416,6 +418,8 @@ class GitLabProject(object):
changed = True
else:
+ if options['default_branch']:
+ project_options['default_branch'] = options['default_branch']
changed, project = self.update_project(self.project_object, project_options)
self.project_object = project
@@ -552,7 +556,9 @@ def main():
],
supports_check_mode=True,
)
- ensure_gitlab_package(module)
+
+ # check prerequisites and connect to gitlab server
+ gitlab_instance = gitlab_authentication(module)
group_identifier = module.params['group']
project_name = module.params['name']
@@ -590,11 +596,6 @@ def main():
security_and_compliance_access_level = module.params['security_and_compliance_access_level']
topics = module.params['topics']
- if default_branch and not initialize_with_readme:
- module.fail_json(msg="Param default_branch need param initialize_with_readme set to true")
-
- gitlab_instance = gitlab_authentication(module)
-
# Set project_path to project_name if it is empty.
if project_path is None:
project_path = project_name.replace(" ", "_")
@@ -636,7 +637,7 @@ def main():
if state == 'present':
- if gitlab_project.create_or_update_project(project_name, namespace, {
+ if gitlab_project.create_or_update_project(module, project_name, namespace, {
"path": project_path,
"description": project_description,
"initialize_with_readme": initialize_with_readme,
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_project_access_token.py b/ansible_collections/community/general/plugins/modules/gitlab_project_access_token.py
new file mode 100644
index 000000000..e692a3057
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/gitlab_project_access_token.py
@@ -0,0 +1,318 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2024, Zoran Krleza (zoran.krleza@true-north.hr)
+# Based on code:
+# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright (c) 2018, Marcus Watkins <marwatk@marcuswatkins.net>
+# Copyright (c) 2013, Phillip Gentry <phillip@cx.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: gitlab_project_access_token
+short_description: Manages GitLab project access tokens
+version_added: 8.4.0
+description:
+ - Creates and revokes project access tokens.
+author:
+ - Zoran Krleza (@pixslx)
+requirements:
+ - python-gitlab >= 3.1.0
+extends_documentation_fragment:
+ - community.general.auth_basic
+ - community.general.gitlab
+ - community.general.attributes
+notes:
+ - Access tokens can not be changed. If a parameter needs to be changed, an acceess token has to be recreated.
+ Whether tokens will be recreated is controlled by the O(recreate) option, which defaults to V(never).
+ - Token string is contained in the result only when access token is created or recreated. It can not be fetched afterwards.
+ - Token matching is done by comparing O(name) option.
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ project:
+ description:
+ - ID or full path of project in the form of group/name.
+ required: true
+ type: str
+ name:
+ description:
+ - Access token's name.
+ required: true
+ type: str
+ scopes:
+ description:
+ - Scope of the access token.
+ required: true
+ type: list
+ elements: str
+ aliases: ["scope"]
+ choices: ["api", "read_api", "read_registry", "write_registry", "read_repository", "write_repository", "create_runner", "ai_features", "k8s_proxy"]
+ access_level:
+ description:
+ - Access level of the access token.
+ type: str
+ default: maintainer
+ choices: ["guest", "reporter", "developer", "maintainer", "owner"]
+ expires_at:
+ description:
+ - Expiration date of the access token in C(YYYY-MM-DD) format.
+ - Make sure to quote this value in YAML to ensure it is kept as a string and not interpreted as a YAML date.
+ type: str
+ required: true
+ recreate:
+ description:
+ - Whether the access token will be recreated if it already exists.
+ - When V(never) the token will never be recreated.
+ - When V(always) the token will always be recreated.
+ - When V(state_change) the token will be recreated if there is a difference between desired state and actual state.
+ type: str
+ choices: ["never", "always", "state_change"]
+ default: never
+ state:
+ description:
+ - When V(present) the access token will be added to the project if it does not exist.
+ - When V(absent) it will be removed from the project if it exists.
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+'''
+
+EXAMPLES = r'''
+- name: "Creating a project access token"
+ community.general.gitlab_project_access_token:
+ api_url: https://gitlab.example.com/
+ api_token: "somegitlabapitoken"
+ project: "my_group/my_project"
+ name: "project_token"
+ expires_at: "2024-12-31"
+ access_level: developer
+ scopes:
+ - api
+ - read_api
+ - read_repository
+ - write_repository
+ state: present
+
+- name: "Revoking a project access token"
+ community.general.gitlab_project_access_token:
+ api_url: https://gitlab.example.com/
+ api_token: "somegitlabapitoken"
+ project: "my_group/my_project"
+ name: "project_token"
+ expires_at: "2024-12-31"
+ scopes:
+ - api
+ - read_api
+ - read_repository
+ - write_repository
+ state: absent
+
+- name: "Change (recreate) existing token if its actual state is different than desired state"
+ community.general.gitlab_project_access_token:
+ api_url: https://gitlab.example.com/
+ api_token: "somegitlabapitoken"
+ project: "my_group/my_project"
+ name: "project_token"
+ expires_at: "2024-12-31"
+ scopes:
+ - api
+ - read_api
+ - read_repository
+ - write_repository
+ recreate: state_change
+ state: present
+'''
+
+RETURN = r'''
+access_token:
+ description:
+ - API object.
+ - Only contains the value of the token if the token was created or recreated.
+ returned: success and O(state=present)
+ type: dict
+'''
+
+from datetime import datetime
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import (
+ auth_argument_spec, find_project, gitlab_authentication, gitlab
+)
+
+ACCESS_LEVELS = dict(guest=10, reporter=20, developer=30, maintainer=40, owner=50)
+
+
+class GitLabProjectAccessToken(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.access_token_object = None
+
+ '''
+ @param project Project Object
+ @param arguments Attributes of the access_token
+ '''
+ def create_access_token(self, project, arguments):
+ changed = False
+ if self._module.check_mode:
+ return True
+
+ try:
+ self.access_token_object = project.access_tokens.create(arguments)
+ changed = True
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to create access token: %s " % to_native(e))
+
+ return changed
+
+ '''
+ @param project Project object
+ @param name of the access token
+ '''
+ def find_access_token(self, project, name):
+ access_tokens = project.access_tokens.list(all=True)
+ for access_token in access_tokens:
+ if (access_token.name == name):
+ self.access_token_object = access_token
+ return False
+ return False
+
+ def revoke_access_token(self):
+ if self._module.check_mode:
+ return True
+
+ changed = False
+ try:
+ self.access_token_object.delete()
+ changed = True
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to revoke access token: %s " % to_native(e))
+
+ return changed
+
+ def access_tokens_equal(self):
+ if self.access_token_object.name != self._module.params['name']:
+ return False
+ if self.access_token_object.scopes != self._module.params['scopes']:
+ return False
+ if self.access_token_object.access_level != ACCESS_LEVELS[self._module.params['access_level']]:
+ return False
+ if self.access_token_object.expires_at != self._module.params['expires_at']:
+ return False
+ return True
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(auth_argument_spec())
+ argument_spec.update(dict(
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ project=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ scopes=dict(type='list',
+ required=True,
+ aliases=['scope'],
+ elements='str',
+ choices=['api',
+ 'read_api',
+ 'read_registry',
+ 'write_registry',
+ 'read_repository',
+ 'write_repository',
+ 'create_runner',
+ 'ai_features',
+ 'k8s_proxy']),
+ access_level=dict(type='str', required=False, default='maintainer', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']),
+ expires_at=dict(type='str', required=True),
+ recreate=dict(type='str', default='never', choices=['never', 'always', 'state_change'])
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_username', 'api_oauth_token'],
+ ['api_username', 'api_job_token'],
+ ['api_token', 'api_oauth_token'],
+ ['api_token', 'api_job_token']
+ ],
+ required_together=[
+ ['api_username', 'api_password']
+ ],
+ required_one_of=[
+ ['api_username', 'api_token', 'api_oauth_token', 'api_job_token']
+ ],
+ supports_check_mode=True
+ )
+
+ state = module.params['state']
+ project_identifier = module.params['project']
+ name = module.params['name']
+ scopes = module.params['scopes']
+ access_level_str = module.params['access_level']
+ expires_at = module.params['expires_at']
+ recreate = module.params['recreate']
+
+ access_level = ACCESS_LEVELS[access_level_str]
+
+ try:
+ datetime.strptime(expires_at, '%Y-%m-%d')
+ except ValueError:
+ module.fail_json(msg="Argument expires_at is not in required format YYYY-MM-DD")
+
+ gitlab_instance = gitlab_authentication(module)
+
+ gitlab_access_token = GitLabProjectAccessToken(module, gitlab_instance)
+
+ project = find_project(gitlab_instance, project_identifier)
+ if project is None:
+ module.fail_json(msg="Failed to create access token: project %s does not exists" % project_identifier)
+
+ gitlab_access_token_exists = False
+ gitlab_access_token.find_access_token(project, name)
+ if gitlab_access_token.access_token_object is not None:
+ gitlab_access_token_exists = True
+
+ if state == 'absent':
+ if gitlab_access_token_exists:
+ gitlab_access_token.revoke_access_token()
+ module.exit_json(changed=True, msg="Successfully deleted access token %s" % name)
+ else:
+ module.exit_json(changed=False, msg="Access token does not exists")
+
+ if state == 'present':
+ if gitlab_access_token_exists:
+ if gitlab_access_token.access_tokens_equal():
+ if recreate == 'always':
+ gitlab_access_token.revoke_access_token()
+ gitlab_access_token.create_access_token(project, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at})
+ module.exit_json(changed=True, msg="Successfully recreated access token", access_token=gitlab_access_token.access_token_object._attrs)
+ else:
+ module.exit_json(changed=False, msg="Access token already exists", access_token=gitlab_access_token.access_token_object._attrs)
+ else:
+ if recreate == 'never':
+ module.fail_json(msg="Access token already exists and its state is different. It can not be updated without recreating.")
+ else:
+ gitlab_access_token.revoke_access_token()
+ gitlab_access_token.create_access_token(project, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at})
+ module.exit_json(changed=True, msg="Successfully recreated access token", access_token=gitlab_access_token.access_token_object._attrs)
+ else:
+ gitlab_access_token.create_access_token(project, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at})
+ module.exit_json(changed=True, msg="Successfully created access token", access_token=gitlab_access_token.access_token_object._attrs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_project_badge.py b/ansible_collections/community/general/plugins/modules/gitlab_project_badge.py
index 5b1a8d3f1..fee938949 100644
--- a/ansible_collections/community/general/plugins/modules/gitlab_project_badge.py
+++ b/ansible_collections/community/general/plugins/modules/gitlab_project_badge.py
@@ -39,8 +39,8 @@ options:
state:
description:
- State of the badge in the project.
- - On C(present), it adds a badge to a GitLab project.
- - On C(absent), it removes a badge from a GitLab project.
+ - On V(present), it adds a badge to a GitLab project.
+ - On V(absent), it removes a badge from a GitLab project.
choices: ['present', 'absent']
default: 'present'
type: str
@@ -82,7 +82,7 @@ EXAMPLES = r'''
RETURN = '''
badge:
description: The badge information.
- returned: when I(state=present)
+ returned: when O(state=present)
type: dict
sample:
id: 1
@@ -97,7 +97,7 @@ from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.gitlab import (
- auth_argument_spec, gitlab_authentication, find_project, ensure_gitlab_package
+ auth_argument_spec, gitlab_authentication, find_project, list_all_kwargs
)
@@ -105,7 +105,7 @@ def present_strategy(module, gl, project, wished_badge):
changed = False
existing_badge = None
- for badge in project.badges.list(iterator=True):
+ for badge in project.badges.list(**list_all_kwargs):
if badge.image_url == wished_badge["image_url"]:
existing_badge = badge
break
@@ -135,7 +135,7 @@ def absent_strategy(module, gl, project, wished_badge):
changed = False
existing_badge = None
- for badge in project.badges.list(iterator=True):
+ for badge in project.badges.list(**list_all_kwargs):
if badge.image_url == wished_badge["image_url"]:
existing_badge = badge
break
@@ -159,13 +159,12 @@ state_strategy = {
def core(module):
- ensure_gitlab_package(module)
+ # check prerequisites and connect to gitlab server
+ gl = gitlab_authentication(module)
gitlab_project = module.params['project']
state = module.params['state']
- gl = gitlab_authentication(module)
-
project = find_project(gl, gitlab_project)
# project doesn't exist
if not project:
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_project_members.py b/ansible_collections/community/general/plugins/modules/gitlab_project_members.py
index 905358443..2ce277f68 100644
--- a/ansible_collections/community/general/plugins/modules/gitlab_project_members.py
+++ b/ansible_collections/community/general/plugins/modules/gitlab_project_members.py
@@ -42,21 +42,21 @@ options:
gitlab_user:
description:
- A username or a list of usernames to add to/remove from the GitLab project.
- - Mutually exclusive with I(gitlab_users_access).
+ - Mutually exclusive with O(gitlab_users_access).
type: list
elements: str
access_level:
description:
- The access level for the user.
- - Required if I(state=present), user state is set to present.
+ - Required if O(state=present), user state is set to present.
type: str
choices: ['guest', 'reporter', 'developer', 'maintainer']
gitlab_users_access:
description:
- Provide a list of user to access level mappings.
- Every dictionary in this list specifies a user (by username) and the access level the user should have.
- - Mutually exclusive with I(gitlab_user) and I(access_level).
- - Use together with I(purge_users) to remove all users not specified here from the project.
+ - Mutually exclusive with O(gitlab_user) and O(access_level).
+ - Use together with O(purge_users) to remove all users not specified here from the project.
type: list
elements: dict
suboptions:
@@ -67,7 +67,7 @@ options:
access_level:
description:
- The access level for the user.
- - Required if I(state=present), user state is set to present.
+ - Required if O(state=present), user state is set to present.
type: str
choices: ['guest', 'reporter', 'developer', 'maintainer']
required: true
@@ -75,16 +75,16 @@ options:
state:
description:
- State of the member in the project.
- - On C(present), it adds a user to a GitLab project.
- - On C(absent), it removes a user from a GitLab project.
+ - On V(present), it adds a user to a GitLab project.
+ - On V(absent), it removes a user from a GitLab project.
choices: ['present', 'absent']
default: 'present'
type: str
purge_users:
description:
- - Adds/remove users of the given access_level to match the given I(gitlab_user)/I(gitlab_users_access) list.
+ - Adds/remove users of the given access_level to match the given O(gitlab_user)/O(gitlab_users_access) list.
If omitted do not purge orphaned members.
- - Is only used when I(state=present).
+ - Is only used when O(state=present).
type: list
elements: str
choices: ['guest', 'reporter', 'developer', 'maintainer']
@@ -106,7 +106,6 @@ EXAMPLES = r'''
community.general.gitlab_project_members:
api_url: 'https://gitlab.example.com'
api_token: 'Your-Private-Token'
- validate_certs: false
project: projectname
gitlab_user: username
state: absent
@@ -163,7 +162,7 @@ from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.gitlab import (
- auth_argument_spec, gitlab_authentication, gitlab, ensure_gitlab_package
+ auth_argument_spec, gitlab_authentication, gitlab
)
@@ -279,13 +278,15 @@ def main():
],
supports_check_mode=True,
)
- ensure_gitlab_package(module)
+
+ # check prerequisites and connect to gitlab server
+ gl = gitlab_authentication(module)
access_level_int = {
- 'guest': gitlab.GUEST_ACCESS,
- 'reporter': gitlab.REPORTER_ACCESS,
- 'developer': gitlab.DEVELOPER_ACCESS,
- 'maintainer': gitlab.MAINTAINER_ACCESS,
+ 'guest': gitlab.const.GUEST_ACCESS,
+ 'reporter': gitlab.const.REPORTER_ACCESS,
+ 'developer': gitlab.const.DEVELOPER_ACCESS,
+ 'maintainer': gitlab.const.MAINTAINER_ACCESS,
}
gitlab_project = module.params['project']
@@ -296,9 +297,6 @@ def main():
if purge_users:
purge_users = [access_level_int[level] for level in purge_users]
- # connect to gitlab server
- gl = gitlab_authentication(module)
-
project = GitLabProjectMembers(module, gl)
gitlab_project_id = project.get_project(gitlab_project)
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_project_variable.py b/ansible_collections/community/general/plugins/modules/gitlab_project_variable.py
index 63569dd78..329e7a414 100644
--- a/ansible_collections/community/general/plugins/modules/gitlab_project_variable.py
+++ b/ansible_collections/community/general/plugins/modules/gitlab_project_variable.py
@@ -14,11 +14,10 @@ description:
- When a project variable does not exist, it will be created.
- When a project variable does exist, its value will be updated when the values are different.
- Variables which are untouched in the playbook, but are not untouched in the GitLab project,
- they stay untouched (I(purge) is C(false)) or will be deleted (I(purge) is C(true)).
+ they stay untouched (O(purge=false)) or will be deleted (O(purge=true)).
author:
- "Markus Bergholz (@markuman)"
requirements:
- - python >= 2.7
- python-gitlab python module
extends_documentation_fragment:
- community.general.auth_basic
@@ -51,16 +50,17 @@ options:
type: bool
vars:
description:
- - When the list element is a simple key-value pair, masked and protected will be set to false.
- - When the list element is a dict with the keys I(value), I(masked) and I(protected), the user can
- have full control about whether a value should be masked, protected or both.
+ - When the list element is a simple key-value pair, masked, raw and protected will be set to false.
+ - When the list element is a dict with the keys C(value), C(masked), C(raw) and C(protected), the user can
+ have full control about whether a value should be masked, raw, protected or both.
- Support for protected values requires GitLab >= 9.3.
- Support for masked values requires GitLab >= 11.10.
+ - Support for raw values requires GitLab >= 15.7.
- Support for environment_scope requires GitLab Premium >= 13.11.
- Support for variable_type requires GitLab >= 11.11.
- - A I(value) must be a string or a number.
- - Field I(variable_type) must be a string with either C(env_var), which is the default, or C(file).
- - Field I(environment_scope) must be a string defined by scope environment.
+ - A C(value) must be a string or a number.
+ - Field C(variable_type) must be a string with either V(env_var), which is the default, or V(file).
+ - Field C(environment_scope) must be a string defined by scope environment.
- When a value is masked, it must be in Base64 and have a length of at least 8 characters.
See GitLab documentation on acceptable values for a masked variable (https://docs.gitlab.com/ce/ci/variables/#masked-variables).
default: {}
@@ -69,7 +69,7 @@ options:
version_added: 4.4.0
description:
- A list of dictionaries that represents CI/CD variables.
- - This module works internal with this structure, even if the older I(vars) parameter is used.
+ - This module works internal with this structure, even if the older O(vars) parameter is used.
default: []
type: list
elements: dict
@@ -82,31 +82,38 @@ options:
value:
description:
- The variable value.
- - Required when I(state=present).
+ - Required when O(state=present).
type: str
masked:
description:
- - Wether variable value is masked or not.
+ - Whether variable value is masked or not.
- Support for masked values requires GitLab >= 11.10.
type: bool
default: false
protected:
description:
- - Wether variable value is protected or not.
+ - Whether variable value is protected or not.
- Support for protected values requires GitLab >= 9.3.
type: bool
default: false
+ raw:
+ description:
+ - Whether variable value is raw or not.
+ - Support for raw values requires GitLab >= 15.7.
+ type: bool
+ default: false
+ version_added: '7.4.0'
variable_type:
description:
- - Wether a variable is an environment variable (C(env_var)) or a file (C(file)).
- - Support for I(variable_type) requires GitLab >= 11.11.
+ - Whether a variable is an environment variable (V(env_var)) or a file (V(file)).
+ - Support for O(variables[].variable_type) requires GitLab >= 11.11.
type: str
choices: ["env_var", "file"]
default: env_var
environment_scope:
description:
- The scope for the variable.
- - Support for I(environment_scope) requires GitLab Premium >= 13.11.
+ - Support for O(variables[].environment_scope) requires GitLab Premium >= 13.11.
type: str
default: '*'
'''
@@ -143,6 +150,38 @@ EXAMPLES = '''
variable_type: env_var
environment_scope: '*'
+- name: Set or update some CI/CD variables with raw value
+ community.general.gitlab_project_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: markuman/dotfiles
+ purge: false
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY:
+ value: 3214cbad
+ masked: true
+ protected: true
+ raw: true
+ variable_type: env_var
+ environment_scope: '*'
+
+- name: Set or update some CI/CD variables with expandable value
+ community.general.gitlab_project_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: markuman/dotfiles
+ purge: false
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY:
+ value: '$MY_OTHER_VARIABLE'
+ masked: true
+ protected: true
+ raw: false
+ variable_type: env_var
+ environment_scope: '*'
+
- name: Delete one variable
community.general.gitlab_project_variable:
api_url: https://gitlab.com
@@ -181,62 +220,16 @@ project_variable:
sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
'''
-import traceback
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.api import basic_auth_argument_spec
-from ansible.module_utils.six import string_types
-from ansible.module_utils.six import integer_types
-GITLAB_IMP_ERR = None
-try:
- import gitlab # noqa: F401, pylint: disable=unused-import
- HAS_GITLAB_PACKAGE = True
-except Exception:
- GITLAB_IMP_ERR = traceback.format_exc()
- HAS_GITLAB_PACKAGE = False
from ansible_collections.community.general.plugins.module_utils.gitlab import (
- auth_argument_spec, gitlab_authentication, ensure_gitlab_package, filter_returned_variables
+ auth_argument_spec, gitlab_authentication, filter_returned_variables, vars_to_variables,
+ list_all_kwargs
)
-def vars_to_variables(vars, module):
- # transform old vars to new variables structure
- variables = list()
- for item, value in vars.items():
- if (isinstance(value, string_types) or
- isinstance(value, (integer_types, float))):
- variables.append(
- {
- "name": item,
- "value": str(value),
- "masked": False,
- "protected": False,
- "variable_type": "env_var",
- }
- )
-
- elif isinstance(value, dict):
-
- new_item = {
- "name": item,
- "value": value.get('value'),
- "masked": value.get('masked'),
- "protected": value.get('protected'),
- "variable_type": value.get('variable_type'),
- }
-
- if value.get('environment_scope'):
- new_item['environment_scope'] = value.get('environment_scope')
-
- variables.append(new_item)
-
- else:
- module.fail_json(msg="value must be of type string, integer, float or dict")
-
- return variables
-
-
class GitlabProjectVariables(object):
def __init__(self, module, gitlab_instance):
@@ -248,14 +241,7 @@ class GitlabProjectVariables(object):
return self.repo.projects.get(project_name)
def list_all_project_variables(self):
- page_nb = 1
- variables = []
- vars_page = self.project.variables.list(page=page_nb)
- while len(vars_page) > 0:
- variables += vars_page
- page_nb += 1
- vars_page = self.project.variables.list(page=page_nb)
- return variables
+ return list(self.project.variables.list(**list_all_kwargs))
def create_variable(self, var_obj):
if self._module.check_mode:
@@ -266,6 +252,7 @@ class GitlabProjectVariables(object):
"value": var_obj.get('value'),
"masked": var_obj.get('masked'),
"protected": var_obj.get('protected'),
+ "raw": var_obj.get('raw'),
"variable_type": var_obj.get('variable_type'),
}
@@ -322,7 +309,7 @@ def compare(requested_variables, existing_variables, state):
def native_python_main(this_gitlab, purge, requested_variables, state, module):
change = False
- return_value = dict(added=list(), updated=list(), removed=list(), untouched=list())
+ return_value = dict(added=[], updated=[], removed=[], untouched=[])
gitlab_keys = this_gitlab.list_all_project_variables()
before = [x.attributes for x in gitlab_keys]
@@ -336,6 +323,8 @@ def native_python_main(this_gitlab, purge, requested_variables, state, module):
item['value'] = str(item.get('value'))
if item.get('protected') is None:
item['protected'] = False
+ if item.get('raw') is None:
+ item['raw'] = False
if item.get('masked') is None:
item['masked'] = False
if item.get('environment_scope') is None:
@@ -391,7 +380,7 @@ def native_python_main(this_gitlab, purge, requested_variables, state, module):
if module.check_mode:
return_value = dict(added=added, updated=updated, removed=return_value['removed'], untouched=untouched)
- if return_value['added'] or return_value['removed'] or return_value['updated']:
+ if any(return_value[x] for x in ['added', 'removed', 'updated']):
change = True
gitlab_keys = this_gitlab.list_all_project_variables()
@@ -407,11 +396,14 @@ def main():
project=dict(type='str', required=True),
purge=dict(type='bool', required=False, default=False),
vars=dict(type='dict', required=False, default=dict(), no_log=True),
+ # please mind whenever changing the variables dict to also change module_utils/gitlab.py's
+ # KNOWN dict in filter_returned_variables or bad evil will happen
variables=dict(type='list', elements='dict', required=False, default=list(), options=dict(
name=dict(type='str', required=True),
value=dict(type='str', no_log=True),
masked=dict(type='bool', default=False),
protected=dict(type='bool', default=False),
+ raw=dict(type='bool', default=False),
environment_scope=dict(type='str', default='*'),
variable_type=dict(type='str', default='env_var', choices=["env_var", "file"]),
)),
@@ -436,10 +428,9 @@ def main():
],
supports_check_mode=True
)
- ensure_gitlab_package(module)
- if not HAS_GITLAB_PACKAGE:
- module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+ # check prerequisites and connect to gitlab server
+ gitlab_instance = gitlab_authentication(module)
purge = module.params['purge']
var_list = module.params['vars']
@@ -452,9 +443,7 @@ def main():
if state == 'present':
if any(x['value'] is None for x in variables):
- module.fail_json(msg='value parameter is required in state present')
-
- gitlab_instance = gitlab_authentication(module)
+ module.fail_json(msg='value parameter is required for all variables in state present')
this_gitlab = GitlabProjectVariables(module=module, gitlab_instance=gitlab_instance)
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_protected_branch.py b/ansible_collections/community/general/plugins/modules/gitlab_protected_branch.py
index fea374cbf..8d2d75736 100644
--- a/ansible_collections/community/general/plugins/modules/gitlab_protected_branch.py
+++ b/ansible_collections/community/general/plugins/modules/gitlab_protected_branch.py
@@ -16,7 +16,6 @@ description:
author:
- "Werner Dijkerman (@dj-wasabi)"
requirements:
- - python >= 2.7
- python-gitlab >= 2.3.0
extends_documentation_fragment:
- community.general.auth_basic
@@ -44,7 +43,7 @@ options:
name:
description:
- The name of the branch that needs to be protected.
- - Can make use a wildcard character for like C(production/*) or just have C(main) or C(develop) as value.
+ - Can make use a wildcard character for like V(production/*) or just have V(main) or V(develop) as value.
required: true
type: str
merge_access_levels:
@@ -83,7 +82,7 @@ from ansible.module_utils.api import basic_auth_argument_spec
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
from ansible_collections.community.general.plugins.module_utils.gitlab import (
- auth_argument_spec, gitlab_authentication, gitlab, ensure_gitlab_package
+ auth_argument_spec, gitlab_authentication, gitlab
)
@@ -94,9 +93,9 @@ class GitlabProtectedBranch(object):
self._module = module
self.project = self.get_project(project)
self.ACCESS_LEVEL = {
- 'nobody': gitlab.NO_ACCESS,
- 'developer': gitlab.DEVELOPER_ACCESS,
- 'maintainer': gitlab.MAINTAINER_ACCESS
+ 'nobody': gitlab.const.NO_ACCESS,
+ 'developer': gitlab.const.DEVELOPER_ACCESS,
+ 'maintainer': gitlab.const.MAINTAINER_ACCESS
}
def get_project(self, project_name):
@@ -164,7 +163,9 @@ def main():
],
supports_check_mode=True
)
- ensure_gitlab_package(module)
+
+ # check prerequisites and connect to gitlab server
+ gitlab_instance = gitlab_authentication(module)
project = module.params['project']
name = module.params['name']
@@ -177,7 +178,6 @@ def main():
module.fail_json(msg="community.general.gitlab_proteched_branch requires python-gitlab Python module >= 2.3.0 (installed version: [%s])."
" Please upgrade python-gitlab to version 2.3.0 or above." % gitlab_version)
- gitlab_instance = gitlab_authentication(module)
this_gitlab = GitlabProtectedBranch(module=module, project=project, gitlab_instance=gitlab_instance)
p_branch = this_gitlab.protected_branch_exist(name=name)
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_runner.py b/ansible_collections/community/general/plugins/modules/gitlab_runner.py
index a41b135fc..e6163a6b6 100644
--- a/ansible_collections/community/general/plugins/modules/gitlab_runner.py
+++ b/ansible_collections/community/general/plugins/modules/gitlab_runner.py
@@ -24,13 +24,12 @@ description:
To create shared runners, you need to ask your administrator to give you this token.
It can be found at U(https://$GITLAB_URL/admin/runners/).
notes:
- - To create a new runner at least the C(api_token), C(description) and C(api_url) options are required.
+ - To create a new runner at least the O(api_token), O(description) and O(api_url) options are required.
- Runners need to have unique descriptions.
author:
- Samy Coenen (@SamyCoenen)
- Guillaume Martinez (@Lunik)
requirements:
- - python >= 2.7
- python-gitlab >= 1.5.0
extends_documentation_fragment:
- community.general.auth_basic
@@ -47,14 +46,16 @@ options:
group:
description:
- ID or full path of the group in the form group/subgroup.
- - Mutually exclusive with I(owned) and I(project).
+ - Mutually exclusive with O(owned) and O(project).
+ - Must be group's numeric ID if O(registration_token) is not set and O(state=present).
type: str
version_added: '6.5.0'
project:
description:
- ID or full path of the project in the form of group/name.
- - Mutually exclusive with I(owned) since community.general 4.5.0.
- - Mutually exclusive with I(group).
+ - Mutually exclusive with O(owned) since community.general 4.5.0.
+ - Mutually exclusive with O(group).
+ - Must be project's numeric ID if O(registration_token) is not set and O(state=present).
type: str
version_added: '3.7.0'
description:
@@ -73,23 +74,35 @@ options:
type: str
registration_token:
description:
- - The registration token is used to register new runners.
- - Required if I(state) is C(present).
+ - The registration token is used to register new runners before GitLab 16.0.
+ - Required if O(state=present) for GitLab < 16.0.
+ - If set, the runner will be created using the old runner creation workflow.
+ - If not set, the runner will be created using the new runner creation workflow, introduced in GitLab 16.0.
+ - If not set, requires python-gitlab >= 4.0.0.
type: str
owned:
description:
- Searches only runners available to the user when searching for existing, when false admin token required.
- - Mutually exclusive with I(project) since community.general 4.5.0.
- - Mutually exclusive with I(group).
+ - Mutually exclusive with O(project) since community.general 4.5.0.
+ - Mutually exclusive with O(group).
default: false
type: bool
version_added: 2.0.0
active:
description:
- Define if the runners is immediately active after creation.
+ - Mutually exclusive with O(paused).
required: false
default: true
type: bool
+ paused:
+ description:
+ - Define if the runners is active or paused after creation.
+ - Mutually exclusive with O(active).
+ required: false
+ default: false
+ type: bool
+ version_added: 8.1.0
locked:
description:
- Determines if the runner is locked or not.
@@ -99,23 +112,24 @@ options:
access_level:
description:
- Determines if a runner can pick up jobs only from protected branches.
- - If I(access_level_on_creation) is not explicitly set to C(true), this option is ignored on registration and
+ - If O(access_level_on_creation) is not explicitly set to V(true), this option is ignored on registration and
is only applied on updates.
- - If set to C(not_protected), runner can pick up jobs from both protected and unprotected branches.
- - If set to C(ref_protected), runner can pick up jobs only from protected branches.
- - The current default is C(ref_protected). This will change to no default in community.general 8.0.0.
- From that version on, if this option is not specified explicitly, GitLab will use C(not_protected)
- on creation, and the value set will not be changed on any updates.
+ - If set to V(not_protected), runner can pick up jobs from both protected and unprotected branches.
+ - If set to V(ref_protected), runner can pick up jobs only from protected branches.
+ - Before community.general 8.0.0 the default was V(ref_protected). This was changed to no default in community.general 8.0.0.
+ If this option is not specified explicitly, GitLab will use V(not_protected) on creation, and the value set
+ will not be changed on any updates.
required: false
choices: ["not_protected", "ref_protected"]
type: str
access_level_on_creation:
description:
- Whether the runner should be registered with an access level or not.
- - If set to C(true), the value of I(access_level) is used for runner registration.
- - If set to C(false), GitLab registers the runner with the default access level.
- - The current default of this option is C(false). This default is deprecated and will change to C(true) in commuinty.general 7.0.0.
+ - If set to V(true), the value of O(access_level) is used for runner registration.
+ - If set to V(false), GitLab registers the runner with the default access level.
+ - The default of this option changed to V(true) in community.general 7.0.0. Before, it was V(false).
required: false
+ default: true
type: bool
version_added: 6.3.0
maximum_timeout:
@@ -205,15 +219,11 @@ from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.general.plugins.module_utils.gitlab import (
- auth_argument_spec, gitlab_authentication, gitlab, ensure_gitlab_package
+ auth_argument_spec, gitlab_authentication, gitlab, list_all_kwargs
)
-try:
- cmp # pylint: disable=used-before-assignment
-except NameError:
- def cmp(a, b):
- return (a > b) - (a < b)
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
class GitLabRunner(object):
@@ -238,27 +248,34 @@ class GitLabRunner(object):
changed = False
arguments = {
- 'active': options['active'],
'locked': options['locked'],
'run_untagged': options['run_untagged'],
'maximum_timeout': options['maximum_timeout'],
'tag_list': options['tag_list'],
}
+
+ if options.get('paused') is not None:
+ arguments['paused'] = options['paused']
+ else:
+ arguments['active'] = options['active']
+
if options.get('access_level') is not None:
arguments['access_level'] = options['access_level']
# Because we have already call userExists in main()
if self.runner_object is None:
arguments['description'] = description
- arguments['token'] = options['registration_token']
+ if options.get('registration_token') is not None:
+ arguments['token'] = options['registration_token']
+ elif options.get('group') is not None:
+ arguments['runner_type'] = 'group_type'
+ arguments['group_id'] = options['group']
+ elif options.get('project') is not None:
+ arguments['runner_type'] = 'project_type'
+ arguments['project_id'] = options['project']
+ else:
+ arguments['runner_type'] = 'instance_type'
access_level_on_creation = self._module.params['access_level_on_creation']
- if access_level_on_creation is None:
- message = "The option 'access_level_on_creation' is unspecified, so 'false' is assumed. "\
- "That means any value of 'access_level' is ignored and GitLab registers the runner with its default value. "\
- "The option 'access_level_on_creation' will switch to 'true' in community.general 7.0.0"
- self._module.deprecate(message, version='7.0.0', collection_name='community.general')
- access_level_on_creation = False
-
if not access_level_on_creation:
arguments.pop('access_level', None)
@@ -266,19 +283,17 @@ class GitLabRunner(object):
changed = True
else:
changed, runner = self.update_runner(self.runner_object, arguments)
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully updated the runner %s" % description)
+
+ try:
+ runner.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update runner: %s " % to_native(e))
self.runner_object = runner
- if changed:
- if self._module.check_mode:
- self._module.exit_json(changed=True, msg="Successfully created or updated the runner %s" % description)
-
- try:
- runner.save()
- except Exception as e:
- self._module.fail_json(msg="Failed to update runner: %s " % to_native(e))
- return True
- else:
- return False
+ return changed
'''
@param arguments Attributes of the runner
@@ -288,7 +303,12 @@ class GitLabRunner(object):
return True
try:
- runner = self._gitlab.runners.create(arguments)
+ if arguments.get('token') is not None:
+ runner = self._gitlab.runners.create(arguments)
+ elif LooseVersion(gitlab.__version__) < LooseVersion('4.0.0'):
+ self._module.fail_json(msg="New runner creation workflow requires python-gitlab 4.0.0 or higher")
+ else:
+ runner = self._gitlab.user.runners.create(arguments)
except (gitlab.exceptions.GitlabCreateError) as e:
self._module.fail_json(msg="Failed to create runner: %s " % to_native(e))
@@ -308,7 +328,7 @@ class GitLabRunner(object):
list1.sort()
list2 = arguments[arg_key]
list2.sort()
- if cmp(list1, list2):
+ if list1 != list2:
setattr(runner, arg_key, arguments[arg_key])
changed = True
else:
@@ -322,7 +342,7 @@ class GitLabRunner(object):
@param description Description of the runner
'''
def find_runner(self, description):
- runners = self._runners_endpoint(as_list=False)
+ runners = self._runners_endpoint(**list_all_kwargs)
for runner in runners:
# python-gitlab 2.2 through at least 2.5 returns a list of dicts for list() instead of a Runner
@@ -361,12 +381,13 @@ def main():
argument_spec.update(dict(
description=dict(type='str', required=True, aliases=["name"]),
active=dict(type='bool', default=True),
+ paused=dict(type='bool', default=False),
owned=dict(type='bool', default=False),
tag_list=dict(type='list', elements='str', default=[]),
run_untagged=dict(type='bool', default=True),
locked=dict(type='bool', default=False),
access_level=dict(type='str', choices=["not_protected", "ref_protected"]),
- access_level_on_creation=dict(type='bool'),
+ access_level_on_creation=dict(type='bool', default=True),
maximum_timeout=dict(type='int', default=3600),
registration_token=dict(type='str', no_log=True),
project=dict(type='str'),
@@ -385,6 +406,7 @@ def main():
['project', 'owned'],
['group', 'owned'],
['project', 'group'],
+ ['active', 'paused'],
],
required_together=[
['api_username', 'api_password'],
@@ -392,12 +414,11 @@ def main():
required_one_of=[
['api_username', 'api_token', 'api_oauth_token', 'api_job_token'],
],
- required_if=[
- ('state', 'present', ['registration_token']),
- ],
supports_check_mode=True,
)
- ensure_gitlab_package(module)
+
+ # check prerequisites and connect to gitlab server
+ gitlab_instance = gitlab_authentication(module)
state = module.params['state']
runner_description = module.params['description']
@@ -411,16 +432,6 @@ def main():
project = module.params['project']
group = module.params['group']
- if access_level is None:
- message = "The option 'access_level' is unspecified, so 'ref_protected' is assumed. "\
- "In order to align the module with GitLab's runner API, this option will lose "\
- "its default value in community.general 8.0.0. From that version on, you must set "\
- "this option to 'ref_protected' explicitly, if you want to have a protected runner, "\
- "otherwise GitLab's default access level gets applied, which is 'not_protected'"
- module.deprecate(message, version='8.0.0', collection_name='community.general')
- access_level = 'ref_protected'
-
- gitlab_instance = gitlab_authentication(module)
gitlab_project = None
gitlab_group = None
@@ -454,6 +465,8 @@ def main():
"access_level": access_level,
"maximum_timeout": maximum_timeout,
"registration_token": registration_token,
+ "group": group,
+ "project": project,
}):
module.exit_json(changed=True, runner=gitlab_runner.runner_object._attrs,
msg="Successfully created or updated the runner %s" % runner_description)
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_user.py b/ansible_collections/community/general/plugins/modules/gitlab_user.py
index 94f371316..6e5ab4ece 100644
--- a/ansible_collections/community/general/plugins/modules/gitlab_user.py
+++ b/ansible_collections/community/general/plugins/modules/gitlab_user.py
@@ -27,7 +27,6 @@ author:
- Lennert Mertens (@LennertMertens)
- Stef Graces (@stgrace)
requirements:
- - python >= 2.7
- python-gitlab python module
- administrator rights on the GitLab server
extends_documentation_fragment:
@@ -45,7 +44,7 @@ options:
name:
description:
- Name of the user you want to create.
- - Required only if C(state) is set to C(present).
+ - Required only if O(state=present).
type: str
username:
description:
@@ -66,7 +65,7 @@ options:
email:
description:
- The email that belongs to the user.
- - Required only if C(state) is set to C(present).
+ - Required only if O(state=present).
type: str
sshkey_name:
description:
@@ -123,7 +122,7 @@ options:
identities:
description:
- List of identities to be added/updated for this user.
- - To remove all other identities from this user, set I(overwrite_identities=true).
+ - To remove all other identities from this user, set O(overwrite_identities=true).
type: list
elements: dict
suboptions:
@@ -139,8 +138,8 @@ options:
overwrite_identities:
description:
- Overwrite identities with identities added in this module.
- - This means that all identities that the user has and that are not listed in I(identities) are removed from the user.
- - This is only done if a list is provided for I(identities). To remove all identities, provide an empty list.
+ - This means that all identities that the user has and that are not listed in O(identities) are removed from the user.
+ - This is only done if a list is provided for O(identities). To remove all identities, provide an empty list.
type: bool
default: false
version_added: 3.3.0
@@ -151,7 +150,6 @@ EXAMPLES = '''
community.general.gitlab_user:
api_url: https://gitlab.example.com/
api_token: "{{ access_token }}"
- validate_certs: false
username: myusername
state: absent
@@ -191,7 +189,6 @@ EXAMPLES = '''
community.general.gitlab_user:
api_url: https://gitlab.example.com/
api_token: "{{ access_token }}"
- validate_certs: false
username: myusername
state: blocked
@@ -199,7 +196,6 @@ EXAMPLES = '''
community.general.gitlab_user:
api_url: https://gitlab.example.com/
api_token: "{{ access_token }}"
- validate_certs: false
username: myusername
state: unblocked
'''
@@ -234,7 +230,7 @@ from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.general.plugins.module_utils.gitlab import (
- auth_argument_spec, find_group, gitlab_authentication, gitlab, ensure_gitlab_package
+ auth_argument_spec, find_group, gitlab_authentication, gitlab, list_all_kwargs
)
@@ -244,12 +240,12 @@ class GitLabUser(object):
self._gitlab = gitlab_instance
self.user_object = None
self.ACCESS_LEVEL = {
- 'guest': gitlab.GUEST_ACCESS,
- 'reporter': gitlab.REPORTER_ACCESS,
- 'developer': gitlab.DEVELOPER_ACCESS,
- 'master': gitlab.MAINTAINER_ACCESS,
- 'maintainer': gitlab.MAINTAINER_ACCESS,
- 'owner': gitlab.OWNER_ACCESS,
+ 'guest': gitlab.const.GUEST_ACCESS,
+ 'reporter': gitlab.const.REPORTER_ACCESS,
+ 'developer': gitlab.const.DEVELOPER_ACCESS,
+ 'master': gitlab.const.MAINTAINER_ACCESS,
+ 'maintainer': gitlab.const.MAINTAINER_ACCESS,
+ 'owner': gitlab.const.OWNER_ACCESS,
}
'''
@@ -349,9 +345,10 @@ class GitLabUser(object):
@param sshkey_name Name of the ssh key
'''
def ssh_key_exists(self, user, sshkey_name):
- keyList = map(lambda k: k.title, user.keys.list(all=True))
-
- return sshkey_name in keyList
+ return any(
+ k.title == sshkey_name
+ for k in user.keys.list(**list_all_kwargs)
+ )
'''
@param user User object
@@ -485,7 +482,7 @@ class GitLabUser(object):
'''
@param user User object
- @param identites List of identities to be added/updated
+ @param identities List of identities to be added/updated
@param overwrite_identities Overwrite user identities with identities passed to this module
'''
def add_identities(self, user, identities, overwrite_identities=False):
@@ -504,7 +501,7 @@ class GitLabUser(object):
'''
@param user User object
- @param identites List of identities to be added/updated
+ @param identities List of identities to be added/updated
'''
def delete_identities(self, user, identities):
changed = False
@@ -519,10 +516,13 @@ class GitLabUser(object):
@param username Username of the user
'''
def find_user(self, username):
- users = self._gitlab.users.list(search=username, all=True)
- for user in users:
- if (user.username == username):
- return user
+ return next(
+ (
+ user for user in self._gitlab.users.list(search=username, **list_all_kwargs)
+ if user.username == username
+ ),
+ None
+ )
'''
@param username Username of the user
@@ -616,7 +616,9 @@ def main():
('state', 'present', ['name', 'email']),
)
)
- ensure_gitlab_package(module)
+
+ # check prerequisites and connect to gitlab server
+ gitlab_instance = gitlab_authentication(module)
user_name = module.params['name']
state = module.params['state']
@@ -635,8 +637,6 @@ def main():
user_identities = module.params['identities']
overwrite_identities = module.params['overwrite_identities']
- gitlab_instance = gitlab_authentication(module)
-
gitlab_user = GitLabUser(module, gitlab_instance)
user_exists = gitlab_user.exists_user(user_username)
if user_exists:
diff --git a/ansible_collections/community/general/plugins/modules/grove.py b/ansible_collections/community/general/plugins/modules/grove.py
index b3e0508ff..b50546b4d 100644
--- a/ansible_collections/community/general/plugins/modules/grove.py
+++ b/ansible_collections/community/general/plugins/modules/grove.py
@@ -39,7 +39,7 @@ options:
type: str
description:
- Message content.
- - The alias I(message) is deprecated and will be removed in community.general 4.0.0.
+ - The alias O(ignore:message) has been removed in community.general 4.0.0.
required: true
url:
type: str
@@ -53,7 +53,7 @@ options:
required: false
validate_certs:
description:
- - If C(false), SSL certificates will not be validated. This should only be used
+ - If V(false), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
default: true
type: bool
diff --git a/ansible_collections/community/general/plugins/modules/hana_query.py b/ansible_collections/community/general/plugins/modules/hana_query.py
deleted file mode 100644
index 0b12e9935..000000000
--- a/ansible_collections/community/general/plugins/modules/hana_query.py
+++ /dev/null
@@ -1,219 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2021, Rainer Leber <rainerleber@gmail.com>
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = r'''
----
-module: hana_query
-short_description: Execute SQL on HANA
-version_added: 3.2.0
-description: This module executes SQL statements on HANA with hdbsql.
-extends_documentation_fragment:
- - community.general.attributes
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- sid:
- description: The system ID.
- type: str
- required: true
- instance:
- description: The instance number.
- type: str
- required: true
- user:
- description: A dedicated username. The user could be also in hdbuserstore. Defaults to C(SYSTEM).
- type: str
- default: SYSTEM
- userstore:
- description: If C(true) the user must be in hdbuserstore.
- type: bool
- default: false
- version_added: 3.5.0
- password:
- description:
- - The password to connect to the database.
- - "B(Note:) Since the passwords have to be passed as command line arguments, I(userstore=true) should
- be used whenever possible, as command line arguments can be seen by other users
- on the same machine."
- type: str
- autocommit:
- description: Autocommit the statement.
- type: bool
- default: true
- host:
- description: The Host IP address. The port can be defined as well.
- type: str
- database:
- description: Define the database on which to connect.
- type: str
- encrypted:
- description: Use encrypted connection. Defaults to C(false).
- type: bool
- default: false
- filepath:
- description:
- - One or more files each containing one SQL query to run.
- - Must be a string or list containing strings.
- type: list
- elements: path
- query:
- description:
- - SQL query to run.
- - Must be a string or list containing strings. Please note that if you supply a string, it will be split by commas (C(,)) to a list.
- It is better to supply a one-element list instead to avoid mangled input.
- type: list
- elements: str
-author:
- - Rainer Leber (@rainerleber)
-'''
-
-EXAMPLES = r'''
-- name: Simple select query
- community.general.hana_query:
- sid: "hdb"
- instance: "01"
- password: "Test123"
- query: "select user_name from users"
-
-- name: Run several queries
- community.general.hana_query:
- sid: "hdb"
- instance: "01"
- password: "Test123"
- query:
- - "select user_name from users;"
- - select * from SYSTEM;
- host: "localhost"
- autocommit: false
-
-- name: Run several queries from file
- community.general.hana_query:
- sid: "hdb"
- instance: "01"
- password: "Test123"
- filepath:
- - /tmp/HANA_CPU_UtilizationPerCore_2.00.020+.txt
- - /tmp/HANA.txt
- host: "localhost"
-
-- name: Run several queries from user store
- community.general.hana_query:
- sid: "hdb"
- instance: "01"
- user: hdbstoreuser
- userstore: true
- query:
- - "select user_name from users;"
- - select * from users;
- autocommit: false
-'''
-
-RETURN = r'''
-query_result:
- description: List containing results of all queries executed (one sublist for every query).
- returned: on success
- type: list
- elements: list
- sample: [[{"Column": "Value1"}, {"Column": "Value2"}], [{"Column": "Value1"}, {"Column": "Value2"}]]
-'''
-
-import csv
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six import StringIO
-from ansible.module_utils.common.text.converters import to_native
-
-
-def csv_to_list(rawcsv):
- reader_raw = csv.DictReader(StringIO(rawcsv))
- reader = [dict((k, v.strip()) for k, v in row.items()) for row in reader_raw]
- return list(reader)
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- sid=dict(type='str', required=True),
- instance=dict(type='str', required=True),
- encrypted=dict(type='bool', default=False),
- host=dict(type='str', required=False),
- user=dict(type='str', default="SYSTEM"),
- userstore=dict(type='bool', default=False),
- password=dict(type='str', no_log=True),
- database=dict(type='str', required=False),
- query=dict(type='list', elements='str', required=False),
- filepath=dict(type='list', elements='path', required=False),
- autocommit=dict(type='bool', default=True),
- ),
- required_one_of=[('query', 'filepath')],
- required_if=[('userstore', False, ['password'])],
- supports_check_mode=False,
- )
- rc, out, err, out_raw = [0, [], "", ""]
-
- params = module.params
-
- sid = (params['sid']).upper()
- instance = params['instance']
- user = params['user']
- userstore = params['userstore']
- password = params['password']
- autocommit = params['autocommit']
- host = params['host']
- database = params['database']
- encrypted = params['encrypted']
-
- filepath = params['filepath']
- query = params['query']
-
- bin_path = "/usr/sap/{sid}/HDB{instance}/exe/hdbsql".format(sid=sid, instance=instance)
-
- try:
- command = [module.get_bin_path(bin_path, required=True)]
- except Exception as e:
- module.fail_json(msg='Failed to find hdbsql at the expected path "{0}". Please check SID and instance number: "{1}"'.format(bin_path, to_native(e)))
-
- if encrypted is True:
- command.extend(['-attemptencrypt'])
- if autocommit is False:
- command.extend(['-z'])
- if host is not None:
- command.extend(['-n', host])
- if database is not None:
- command.extend(['-d', database])
- # -x Suppresses additional output, such as the number of selected rows in a result set.
- if userstore:
- command.extend(['-x', '-U', user])
- else:
- command.extend(['-x', '-i', instance, '-u', user, '-p', password])
-
- if filepath is not None:
- command.extend(['-I'])
- for p in filepath:
- # makes a command like hdbsql -i 01 -u SYSTEM -p secret123# -I /tmp/HANA_CPU_UtilizationPerCore_2.00.020+.txt,
- # iterates through files and append the output to var out.
- query_command = command + [p]
- (rc, out_raw, err) = module.run_command(query_command)
- out.append(csv_to_list(out_raw))
- if query is not None:
- for q in query:
- # makes a command like hdbsql -i 01 -u SYSTEM -p secret123# "select user_name from users",
- # iterates through multiple commands and append the output to var out.
- query_command = command + [q]
- (rc, out_raw, err) = module.run_command(query_command)
- out.append(csv_to_list(out_raw))
- changed = True
-
- module.exit_json(changed=changed, rc=rc, query_result=out, stderr=err)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/haproxy.py b/ansible_collections/community/general/plugins/modules/haproxy.py
index 56f987d80..05f52d55c 100644
--- a/ansible_collections/community/general/plugins/modules/haproxy.py
+++ b/ansible_collections/community/general/plugins/modules/haproxy.py
@@ -65,7 +65,7 @@ options:
state:
description:
- Desired state of the provided backend host.
- - Note that C(drain) state was added in version 2.4.
+ - Note that V(drain) state was added in version 2.4.
- It is supported only by HAProxy version 1.5 or later,
- When used on versions < 1.5, it will be ignored.
type: str
@@ -73,13 +73,13 @@ options:
choices: [ disabled, drain, enabled ]
agent:
description:
- - Disable/enable agent checks (depending on I(state) value).
+ - Disable/enable agent checks (depending on O(state) value).
type: bool
default: false
version_added: 1.0.0
health:
description:
- - Disable/enable health checks (depending on I(state) value).
+ - Disable/enable health checks (depending on O(state) value).
type: bool
default: false
version_added: "1.0.0"
@@ -90,8 +90,8 @@ options:
default: false
wait:
description:
- - Wait until the server reports a status of C(UP) when I(state=enabled),
- status of C(MAINT) when I(state=disabled) or status of C(DRAIN) when I(state=drain).
+ - Wait until the server reports a status of C(UP) when O(state=enabled),
+ status of C(MAINT) when O(state=disabled) or status of C(DRAIN) when O(state=drain).
type: bool
default: false
wait_interval:
@@ -107,7 +107,7 @@ options:
weight:
description:
- The value passed in argument.
- - If the value ends with the C(%) sign, then the new weight will be
+ - If the value ends with the V(%) sign, then the new weight will be
relative to the initially configured weight.
- Relative weights are only permitted between 0 and 100% and absolute
weights are permitted between 0 and 256.
diff --git a/ansible_collections/community/general/plugins/modules/heroku_collaborator.py b/ansible_collections/community/general/plugins/modules/heroku_collaborator.py
index e7b0de3f9..e07ae333d 100644
--- a/ansible_collections/community/general/plugins/modules/heroku_collaborator.py
+++ b/ansible_collections/community/general/plugins/modules/heroku_collaborator.py
@@ -14,9 +14,9 @@ module: heroku_collaborator
short_description: Add or delete app collaborators on Heroku
description:
- Manages collaborators for Heroku apps.
- - If set to C(present) and heroku user is already collaborator, then do nothing.
- - If set to C(present) and heroku user is not collaborator, then add user to app.
- - If set to C(absent) and heroku user is collaborator, then delete user from app.
+ - If set to V(present) and heroku user is already collaborator, then do nothing.
+ - If set to V(present) and heroku user is not collaborator, then add user to app.
+ - If set to V(absent) and heroku user is collaborator, then delete user from app.
author:
- Marcel Arns (@marns93)
requirements:
@@ -56,8 +56,8 @@ options:
choices: ["present", "absent"]
default: "present"
notes:
- - C(HEROKU_API_KEY) and C(TF_VAR_HEROKU_API_KEY) env variable can be used instead setting C(api_key).
- - If you use I(--check), you can also pass the I(-v) flag to see affected apps in C(msg), e.g. ["heroku-example-app"].
+ - E(HEROKU_API_KEY) and E(TF_VAR_HEROKU_API_KEY) environment variables can be used instead setting O(api_key).
+ - If you use C(check_mode), you can also pass the C(-v) flag to see affected apps in C(msg), e.g. ["heroku-example-app"].
'''
EXAMPLES = '''
diff --git a/ansible_collections/community/general/plugins/modules/hg.py b/ansible_collections/community/general/plugins/modules/hg.py
index dbbd504b4..4b6b7c433 100644
--- a/ansible_collections/community/general/plugins/modules/hg.py
+++ b/ansible_collections/community/general/plugins/modules/hg.py
@@ -43,8 +43,7 @@ options:
type: str
force:
description:
- - Discards uncommitted changes. Runs C(hg update -C). Prior to
- 1.9, the default was C(true).
+ - Discards uncommitted changes. Runs C(hg update -C).
type: bool
default: false
purge:
@@ -54,12 +53,12 @@ options:
default: false
update:
description:
- - If C(false), do not retrieve new revisions from the origin repository
+ - If V(false), do not retrieve new revisions from the origin repository
type: bool
default: true
clone:
description:
- - If C(false), do not clone the repository if it does not exist locally.
+ - If V(false), do not clone the repository if it does not exist locally.
type: bool
default: true
executable:
diff --git a/ansible_collections/community/general/plugins/modules/hipchat.py b/ansible_collections/community/general/plugins/modules/hipchat.py
index 11b5fb735..83e253679 100644
--- a/ansible_collections/community/general/plugins/modules/hipchat.py
+++ b/ansible_collections/community/general/plugins/modules/hipchat.py
@@ -64,7 +64,7 @@ options:
default: true
validate_certs:
description:
- - If C(false), SSL certificates will not be validated. This should only be used
+ - If V(false), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
type: bool
default: true
diff --git a/ansible_collections/community/general/plugins/modules/homebrew.py b/ansible_collections/community/general/plugins/modules/homebrew.py
index 7592f95a4..5d471797a 100644
--- a/ansible_collections/community/general/plugins/modules/homebrew.py
+++ b/ansible_collections/community/general/plugins/modules/homebrew.py
@@ -42,9 +42,9 @@ options:
elements: str
path:
description:
- - "A C(:) separated list of paths to search for C(brew) executable.
- Since a package (I(formula) in homebrew parlance) location is prefixed relative to the actual path of I(brew) command,
- providing an alternative I(brew) path enables managing different set of packages in an alternative location in the system."
+ - "A V(:) separated list of paths to search for C(brew) executable.
+ Since a package (I(formula) in homebrew parlance) location is prefixed relative to the actual path of C(brew) command,
+ providing an alternative C(brew) path enables managing different set of packages in an alternative location in the system."
default: '/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin'
type: path
state:
@@ -78,7 +78,7 @@ options:
version_added: '0.2.0'
notes:
- When used with a C(loop:) each package will be processed individually,
- it is much more efficient to pass the list directly to the I(name) option.
+ it is much more efficient to pass the list directly to the O(name) option.
'''
EXAMPLES = '''
@@ -87,7 +87,7 @@ EXAMPLES = '''
name: foo
state: present
-# Install formula foo with 'brew' in alternate path C(/my/other/location/bin)
+# Install formula foo with 'brew' in alternate path (/my/other/location/bin)
- community.general.homebrew:
name: foo
path: /my/other/location/bin
@@ -165,6 +165,7 @@ changed_pkgs:
version_added: '0.2.0'
'''
+import json
import os.path
import re
@@ -184,6 +185,10 @@ def _create_regex_group_complement(s):
chars = filter(None, (line.split('#')[0].strip() for line in lines))
group = r'[^' + r''.join(chars) + r']'
return re.compile(group)
+
+
+def _check_package_in_json(json_output, package_type):
+ return bool(json_output.get(package_type, []) and json_output[package_type][0].get("installed"))
# /utils ------------------------------------------------------------------ }}}
@@ -479,17 +484,17 @@ class Homebrew(object):
cmd = [
"{brew_path}".format(brew_path=self.brew_path),
"info",
+ "--json=v2",
self.current_package,
]
rc, out, err = self.module.run_command(cmd)
- for line in out.split('\n'):
- if (
- re.search(r'Built from source', line)
- or re.search(r'Poured from bottle', line)
- ):
- return True
-
- return False
+ if err:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+ data = json.loads(out)
+
+ return _check_package_in_json(data, "formulae") or _check_package_in_json(data, "casks")
def _current_package_is_outdated(self):
if not self.valid_package(self.current_package):
diff --git a/ansible_collections/community/general/plugins/modules/homebrew_tap.py b/ansible_collections/community/general/plugins/modules/homebrew_tap.py
index b230dbb34..151d09d32 100644
--- a/ansible_collections/community/general/plugins/modules/homebrew_tap.py
+++ b/ansible_collections/community/general/plugins/modules/homebrew_tap.py
@@ -42,7 +42,7 @@ options:
- The optional git URL of the repository to tap. The URL is not
assumed to be on GitHub, and the protocol doesn't have to be HTTP.
Any location and protocol that git can handle is fine.
- - I(name) option may not be a list of multiple taps (but a single
+ - O(name) option may not be a list of multiple taps (but a single
tap instead) when this option is provided.
required: false
type: str
@@ -55,7 +55,7 @@ options:
type: str
path:
description:
- - "A C(:) separated list of paths to search for C(brew) executable."
+ - "A V(:) separated list of paths to search for C(brew) executable."
default: '/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin'
type: path
version_added: '2.1.0'
diff --git a/ansible_collections/community/general/plugins/modules/homectl.py b/ansible_collections/community/general/plugins/modules/homectl.py
index 301e388d3..ca4c19a87 100644
--- a/ansible_collections/community/general/plugins/modules/homectl.py
+++ b/ansible_collections/community/general/plugins/modules/homectl.py
@@ -37,7 +37,7 @@ options:
- Homed requires this value to be in cleartext on user creation and updating a user.
- The module takes the password and generates a password hash in SHA-512 with 10000 rounds of salt generation using crypt.
- See U(https://systemd.io/USER_RECORD/).
- - This is required for I(state=present). When an existing user is updated this is checked against the stored hash in homed.
+ - This is required for O(state=present). When an existing user is updated this is checked against the stored hash in homed.
type: str
state:
description:
@@ -55,11 +55,11 @@ options:
disksize:
description:
- The intended home directory disk space.
- - Human readable value such as C(10G), C(10M), or C(10B).
+ - Human readable value such as V(10G), V(10M), or V(10B).
type: str
resize:
description:
- - When used with I(disksize) this will attempt to resize the home directory immediately.
+ - When used with O(disksize) this will attempt to resize the home directory immediately.
default: false
type: bool
realname:
@@ -90,7 +90,7 @@ options:
description:
- Path to use as home directory for the user.
- This is the directory the user's home directory is mounted to while the user is logged in.
- - This is not where the user's data is actually stored, see I(imagepath) for that.
+ - This is not where the user's data is actually stored, see O(imagepath) for that.
- Only used when a user is first created.
type: path
imagepath:
@@ -102,25 +102,25 @@ options:
uid:
description:
- Sets the UID of the user.
- - If using I(gid) homed requires the value to be the same.
+ - If using O(gid) homed requires the value to be the same.
- Only used when a user is first created.
type: int
gid:
description:
- Sets the gid of the user.
- - If using I(uid) homed requires the value to be the same.
+ - If using O(uid) homed requires the value to be the same.
- Only used when a user is first created.
type: int
mountopts:
description:
- String separated by comma each indicating mount options for a users home directory.
- - Valid options are C(nosuid), C(nodev) or C(noexec).
- - Homed by default uses C(nodev) and C(nosuid) while C(noexec) is off.
+ - Valid options are V(nosuid), V(nodev) or V(noexec).
+ - Homed by default uses V(nodev) and V(nosuid) while V(noexec) is off.
type: str
umask:
description:
- Sets the umask for the user's login sessions
- - Value from C(0000) to C(0777).
+ - Value from V(0000) to V(0777).
type: int
memberof:
description:
@@ -132,13 +132,13 @@ options:
description:
- The absolute path to the skeleton directory to populate a new home directory from.
- This is only used when a home directory is first created.
- - If not specified homed by default uses C(/etc/skel).
+ - If not specified homed by default uses V(/etc/skel).
aliases: [ 'skel' ]
type: path
shell:
description:
- Shell binary to use for terminal logins of given user.
- - If not specified homed by default uses C(/bin/bash).
+ - If not specified homed by default uses V(/bin/bash).
type: str
environment:
description:
@@ -151,7 +151,7 @@ options:
timezone:
description:
- Preferred timezone to use for the user.
- - Should be a tzdata compatible location string such as C(America/New_York).
+ - Should be a tzdata compatible location string such as V(America/New_York).
type: str
locked:
description:
@@ -160,7 +160,7 @@ options:
language:
description:
- The preferred language/locale for the user.
- - This should be in a format compatible with the C($LANG) environment variable.
+ - This should be in a format compatible with the E(LANG) environment variable.
type: str
passwordhint:
description:
@@ -393,7 +393,7 @@ class Homectl(object):
user_metadata.pop('status', None)
# Let last change Usec be updated by homed when command runs.
user_metadata.pop('lastChangeUSec', None)
- # Now only change fields that are called on leaving whats currently in the record intact.
+ # Now only change fields that are called on leaving what's currently in the record intact.
record = user_metadata
record['userName'] = self.name
@@ -439,7 +439,7 @@ class Homectl(object):
self.result['changed'] = True
if self.disksize:
- # convert humand readble to bytes
+ # convert human readable to bytes
if self.disksize != record.get('diskSize'):
record['diskSize'] = human_to_bytes(self.disksize)
self.result['changed'] = True
diff --git a/ansible_collections/community/general/plugins/modules/honeybadger_deployment.py b/ansible_collections/community/general/plugins/modules/honeybadger_deployment.py
index 820e4538e..cf52745ac 100644
--- a/ansible_collections/community/general/plugins/modules/honeybadger_deployment.py
+++ b/ansible_collections/community/general/plugins/modules/honeybadger_deployment.py
@@ -52,7 +52,7 @@ options:
default: "https://api.honeybadger.io/v1/deploys"
validate_certs:
description:
- - If C(false), SSL certificates for the target url will not be validated. This should only be used
+ - If V(false), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
type: bool
default: true
diff --git a/ansible_collections/community/general/plugins/modules/hpilo_info.py b/ansible_collections/community/general/plugins/modules/hpilo_info.py
index cef6597e4..d329764b4 100644
--- a/ansible_collections/community/general/plugins/modules/hpilo_info.py
+++ b/ansible_collections/community/general/plugins/modules/hpilo_info.py
@@ -19,8 +19,6 @@ description:
These information includes hardware and network related information useful
for provisioning (e.g. macaddress, uuid).
- This module requires the C(hpilo) python module.
-- This module was called C(hpilo_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(community.general.hpilo_info) module no longer returns C(ansible_facts)!
extends_documentation_fragment:
- community.general.attributes
- community.general.attributes.info_module
@@ -125,7 +123,7 @@ hw_uuid:
host_power_status:
description:
- Power status of host.
- - Will be one of C(ON), C(OFF) and C(UNKNOWN).
+ - Will be one of V(ON), V(OFF) and V(UNKNOWN).
returned: always
type: str
sample: "ON"
diff --git a/ansible_collections/community/general/plugins/modules/htpasswd.py b/ansible_collections/community/general/plugins/modules/htpasswd.py
index 180b02073..9633ce2fb 100644
--- a/ansible_collections/community/general/plugins/modules/htpasswd.py
+++ b/ansible_collections/community/general/plugins/modules/htpasswd.py
@@ -26,51 +26,53 @@ options:
required: true
aliases: [ dest, destfile ]
description:
- - Path to the file that contains the usernames and passwords
+ - Path to the file that contains the usernames and passwords.
name:
type: str
required: true
aliases: [ username ]
description:
- - User name to add or remove
+ - User name to add or remove.
password:
type: str
required: false
description:
- Password associated with user.
- Must be specified if user does not exist yet.
- crypt_scheme:
+ hash_scheme:
type: str
required: false
default: "apr_md5_crypt"
description:
- - Encryption scheme to be used. As well as the four choices listed
+ - Hashing scheme to be used. As well as the four choices listed
here, you can also use any other hash supported by passlib, such as
- C(portable_apache22) and C(host_apache24); or C(md5_crypt) and C(sha256_crypt),
- which are Linux passwd hashes. Only some schemes in addition to
+ V(portable_apache22) and V(host_apache24); or V(md5_crypt) and V(sha256_crypt),
+ which are Linux passwd hashes. Only some schemes in addition to
the four choices below will be compatible with Apache or Nginx, and
supported schemes depend on passlib version and its dependencies.
- See U(https://passlib.readthedocs.io/en/stable/lib/passlib.apache.html#passlib.apache.HtpasswdFile) parameter C(default_scheme).
- - 'Some of the available choices might be: C(apr_md5_crypt), C(des_crypt), C(ldap_sha1), C(plaintext).'
+ - 'Some of the available choices might be: V(apr_md5_crypt), V(des_crypt), V(ldap_sha1), V(plaintext).'
+ aliases: [crypt_scheme]
state:
type: str
required: false
choices: [ present, absent ]
default: "present"
description:
- - Whether the user entry should be present or not
+ - Whether the user entry should be present or not.
create:
required: false
type: bool
default: true
description:
- - Used with I(state=present). If specified, the file will be created
- if it does not already exist. If set to C(false), will fail if the
- file does not exist
+ - Used with O(state=present). If V(true), the file will be created
+ if it does not exist. Conversely, if set to V(false) and the file
+ does not exist it will fail.
notes:
- - "This module depends on the I(passlib) Python library, which needs to be installed on all target systems."
- - "On Debian, Ubuntu, or Fedora: install I(python-passlib)."
- - "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)."
+ - "This module depends on the C(passlib) Python library, which needs to be installed on all target systems."
+ - "On Debian < 11, Ubuntu <= 20.04, or Fedora: install C(python-passlib)."
+ - "On Debian, Ubuntu: install C(python3-passlib)."
+ - "On RHEL or CentOS: Enable EPEL, then install C(python-passlib)."
requirements: [ passlib>=1.6 ]
author: "Ansible Core Team"
extends_documentation_fragment:
@@ -99,28 +101,22 @@ EXAMPLES = """
path: /etc/mail/passwords
name: alex
password: oedu2eGh
- crypt_scheme: md5_crypt
+ hash_scheme: md5_crypt
"""
import os
import tempfile
-import traceback
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils import deps
from ansible.module_utils.common.text.converters import to_native
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-PASSLIB_IMP_ERR = None
-try:
+with deps.declare("passlib"):
from passlib.apache import HtpasswdFile, htpasswd_context
from passlib.context import CryptContext
- import passlib
-except ImportError:
- PASSLIB_IMP_ERR = traceback.format_exc()
- passlib_installed = False
-else:
- passlib_installed = True
+
apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
@@ -131,50 +127,34 @@ def create_missing_directories(dest):
os.makedirs(destpath)
-def present(dest, username, password, crypt_scheme, create, check_mode):
+def present(dest, username, password, hash_scheme, create, check_mode):
""" Ensures user is present
Returns (msg, changed) """
- if crypt_scheme in apache_hashes:
+ if hash_scheme in apache_hashes:
context = htpasswd_context
else:
- context = CryptContext(schemes=[crypt_scheme] + apache_hashes)
+ context = CryptContext(schemes=[hash_scheme] + apache_hashes)
if not os.path.exists(dest):
if not create:
raise ValueError('Destination %s does not exist' % dest)
if check_mode:
return ("Create %s" % dest, True)
create_missing_directories(dest)
- if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
- ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme, context=context)
- else:
- ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme, context=context)
- if getattr(ht, 'set_password', None):
- ht.set_password(username, password)
- else:
- ht.update(username, password)
+ ht = HtpasswdFile(dest, new=True, default_scheme=hash_scheme, context=context)
+ ht.set_password(username, password)
ht.save()
return ("Created %s and added %s" % (dest, username), True)
else:
- if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
- ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme, context=context)
- else:
- ht = HtpasswdFile(dest, default=crypt_scheme, context=context)
+ ht = HtpasswdFile(dest, new=False, default_scheme=hash_scheme, context=context)
- found = None
- if getattr(ht, 'check_password', None):
- found = ht.check_password(username, password)
- else:
- found = ht.verify(username, password)
+ found = ht.check_password(username, password)
if found:
return ("%s already present" % username, False)
else:
if not check_mode:
- if getattr(ht, 'set_password', None):
- ht.set_password(username, password)
- else:
- ht.update(username, password)
+ ht.set_password(username, password)
ht.save()
return ("Add/update %s" % username, True)
@@ -183,10 +163,7 @@ def absent(dest, username, check_mode):
""" Ensures user is absent
Returns (msg, changed) """
- if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
- ht = HtpasswdFile(dest, new=False)
- else:
- ht = HtpasswdFile(dest)
+ ht = HtpasswdFile(dest, new=False)
if username not in ht.users():
return ("%s not present" % username, False)
@@ -215,7 +192,7 @@ def main():
path=dict(type='path', required=True, aliases=["dest", "destfile"]),
name=dict(type='str', required=True, aliases=["username"]),
password=dict(type='str', required=False, default=None, no_log=True),
- crypt_scheme=dict(type='str', required=False, default="apr_md5_crypt"),
+ hash_scheme=dict(type='str', required=False, default="apr_md5_crypt", aliases=["crypt_scheme"]),
state=dict(type='str', required=False, default="present", choices=["present", "absent"]),
create=dict(type='bool', default=True),
@@ -227,25 +204,18 @@ def main():
path = module.params['path']
username = module.params['name']
password = module.params['password']
- crypt_scheme = module.params['crypt_scheme']
+ hash_scheme = module.params['hash_scheme']
state = module.params['state']
create = module.params['create']
check_mode = module.check_mode
- if not passlib_installed:
- module.fail_json(msg=missing_required_lib("passlib"), exception=PASSLIB_IMP_ERR)
+ deps.validate(module)
+ # TODO double check if this hack below is still needed.
# Check file for blank lines in effort to avoid "need more than 1 value to unpack" error.
try:
- f = open(path, "r")
- except IOError:
- # No preexisting file to remove blank lines from
- f = None
- else:
- try:
+ with open(path, "r") as f:
lines = f.readlines()
- finally:
- f.close()
# If the file gets edited, it returns true, so only edit the file if it has blank lines
strip = False
@@ -259,15 +229,16 @@ def main():
if check_mode:
temp = tempfile.NamedTemporaryFile()
path = temp.name
- f = open(path, "w")
- try:
- [f.write(line) for line in lines if line.strip()]
- finally:
- f.close()
+ with open(path, "w") as f:
+ f.writelines(line for line in lines if line.strip())
+
+ except IOError:
+ # No preexisting file to remove blank lines from
+ pass
try:
if state == 'present':
- (msg, changed) = present(path, username, password, crypt_scheme, create, check_mode)
+ (msg, changed) = present(path, username, password, hash_scheme, create, check_mode)
elif state == 'absent':
if not os.path.exists(path):
module.exit_json(msg="%s not present" % username,
diff --git a/ansible_collections/community/general/plugins/modules/hwc_ecs_instance.py b/ansible_collections/community/general/plugins/modules/hwc_ecs_instance.py
index 434db242f..9ba95dc96 100644
--- a/ansible_collections/community/general/plugins/modules/hwc_ecs_instance.py
+++ b/ansible_collections/community/general/plugins/modules/hwc_ecs_instance.py
@@ -73,8 +73,8 @@ options:
name:
description:
- Specifies the ECS name. Value requirements consists of 1 to 64
- characters, including letters, digits, underscores C(_), hyphens
- (-), periods (.).
+ characters, including letters, digits, underscores (V(_)), hyphens
+ (V(-)), periods (V(.)).
type: str
required: true
nics:
@@ -306,8 +306,8 @@ RETURN = '''
name:
description:
- Specifies the ECS name. Value requirements "Consists of 1 to 64
- characters, including letters, digits, underscores C(_), hyphens
- (-), periods (.)".
+ characters, including letters, digits, underscores (V(_)), hyphens
+ (V(-)), periods (V(.)).".
type: str
returned: success
nics:
diff --git a/ansible_collections/community/general/plugins/modules/hwc_smn_topic.py b/ansible_collections/community/general/plugins/modules/hwc_smn_topic.py
index 88207d3f9..bb983fba7 100644
--- a/ansible_collections/community/general/plugins/modules/hwc_smn_topic.py
+++ b/ansible_collections/community/general/plugins/modules/hwc_smn_topic.py
@@ -45,7 +45,7 @@ options:
description:
- Name of the topic to be created. The topic name is a string of 1
to 256 characters. It must contain upper- or lower-case letters,
- digits, hyphens (-), and underscores C(_), and must start with a
+ digits, hyphens (V(-)), and underscores (V(_)), and must start with a
letter or digit.
type: str
required: true
@@ -85,7 +85,7 @@ name:
description:
- Name of the topic to be created. The topic name is a string of 1
to 256 characters. It must contain upper- or lower-case letters,
- digits, hyphens (-), and underscores C(_), and must start with a
+ digits, hyphens (V(-)), and underscores (V(_)), and must start with a
letter or digit.
returned: success
type: str
diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_eip.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_eip.py
index 9fc0361b3..5c4431940 100644
--- a/ansible_collections/community/general/plugins/modules/hwc_vpc_eip.py
+++ b/ansible_collections/community/general/plugins/modules/hwc_vpc_eip.py
@@ -75,7 +75,7 @@ options:
description:
- Specifies the bandwidth name. The value is a string of 1
to 64 characters that can contain letters, digits,
- underscores C(_), hyphens (-), and periods (.).
+ underscores (V(_)), hyphens (V(-)), and periods (V(.)).
type: str
required: true
size:
@@ -187,7 +187,7 @@ RETURN = '''
description:
- Specifies the bandwidth name. The value is a string of 1
to 64 characters that can contain letters, digits,
- underscores C(_), hyphens (-), and periods (.).
+ underscores (V(_)), hyphens (V(-)), and periods (V(.)).
type: str
returned: success
size:
diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_private_ip.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_private_ip.py
index c57ddc670..95e759f6f 100644
--- a/ansible_collections/community/general/plugins/modules/hwc_vpc_private_ip.py
+++ b/ansible_collections/community/general/plugins/modules/hwc_vpc_private_ip.py
@@ -19,8 +19,8 @@ description:
- vpc private ip management.
short_description: Creates a resource of Vpc/PrivateIP in Huawei Cloud
notes:
- - If I(id) option is provided, it takes precedence over I(subnet_id), I(ip_address) for private ip selection.
- - I(subnet_id), I(ip_address) are used for private ip selection. If more than one private ip with this options exists, execution is aborted.
+ - If O(id) option is provided, it takes precedence over O(subnet_id), O(ip_address) for private ip selection.
+ - O(subnet_id), O(ip_address) are used for private ip selection. If more than one private ip with this options exists, execution is aborted.
- No parameter support updating. If one of option is changed, the module will create a new resource.
version_added: '0.2.0'
author: Huawei Inc. (@huaweicloud)
diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_route.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_route.py
index 1612cac50..091b49b0c 100644
--- a/ansible_collections/community/general/plugins/modules/hwc_vpc_route.py
+++ b/ansible_collections/community/general/plugins/modules/hwc_vpc_route.py
@@ -19,8 +19,8 @@ description:
- vpc route management.
short_description: Creates a resource of Vpc/Route in Huawei Cloud
notes:
- - If I(id) option is provided, it takes precedence over I(destination), I(vpc_id), I(type) and I(next_hop) for route selection.
- - I(destination), I(vpc_id), I(type) and I(next_hop) are used for route selection. If more than one route with this options exists, execution is aborted.
+ - If O(id) option is provided, it takes precedence over O(destination), O(vpc_id), O(type), and O(next_hop) for route selection.
+ - O(destination), O(vpc_id), O(type) and O(next_hop) are used for route selection. If more than one route with this options exists, execution is aborted.
- No parameter support updating. If one of option is changed, the module will create a new resource.
version_added: '0.2.0'
author: Huawei Inc. (@huaweicloud)
diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group.py
index c210b912d..aa65e801c 100644
--- a/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group.py
+++ b/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group.py
@@ -19,9 +19,9 @@ description:
- vpc security group management.
short_description: Creates a resource of Vpc/SecurityGroup in Huawei Cloud
notes:
- - If I(id) option is provided, it takes precedence over I(name),
- I(enterprise_project_id) and I(vpc_id) for security group selection.
- - I(name), I(enterprise_project_id) and I(vpc_id) are used for security
+ - If O(id) option is provided, it takes precedence over O(name),
+ O(enterprise_project_id), and O(vpc_id) for security group selection.
+ - O(name), O(enterprise_project_id) and O(vpc_id) are used for security
group selection. If more than one security group with this options exists,
execution is aborted.
- No parameter support updating. If one of option is changed, the module
@@ -45,8 +45,8 @@ options:
name:
description:
- Specifies the security group name. The value is a string of 1 to
- 64 characters that can contain letters, digits, underscores C(_),
- hyphens (-), and periods (.).
+ 64 characters that can contain letters, digits, underscores (V(_)),
+ hyphens (V(-)), and periods (V(.)).
type: str
required: true
enterprise_project_id:
@@ -79,8 +79,8 @@ RETURN = '''
name:
description:
- Specifies the security group name. The value is a string of 1 to
- 64 characters that can contain letters, digits, underscores C(_),
- hyphens (-), and periods (.).
+ 64 characters that can contain letters, digits, underscores (V(_)),
+ hyphens (V(-)), and periods (V(.)).
type: str
returned: success
enterprise_project_id:
diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group_rule.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group_rule.py
index bfb5d6a61..899647e8c 100644
--- a/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group_rule.py
+++ b/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group_rule.py
@@ -19,9 +19,9 @@ description:
- vpc security group management.
short_description: Creates a resource of Vpc/SecurityGroupRule in Huawei Cloud
notes:
- - If I(id) option is provided, it takes precedence over
- I(enterprise_project_id) for security group rule selection.
- - I(security_group_id) is used for security group rule selection. If more
+ - If O(id) option is provided, it takes precedence over
+ O(security_group_id) for security group rule selection.
+ - O(security_group_id) is used for security group rule selection. If more
than one security group rule with this options exists, execution is
aborted.
- No parameter support updating. If one of option is changed, the module
diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_subnet.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_subnet.py
index 7fb107f53..7ba747330 100644
--- a/ansible_collections/community/general/plugins/modules/hwc_vpc_subnet.py
+++ b/ansible_collections/community/general/plugins/modules/hwc_vpc_subnet.py
@@ -66,8 +66,8 @@ options:
name:
description:
- Specifies the subnet name. The value is a string of 1 to 64
- characters that can contain letters, digits, underscores C(_),
- hyphens (-), and periods (.).
+ characters that can contain letters, digits, underscores (V(_)),
+ hyphens (V(-)), and periods (V(.)).
type: str
required: true
vpc_id:
@@ -137,8 +137,8 @@ RETURN = '''
name:
description:
- Specifies the subnet name. The value is a string of 1 to 64
- characters that can contain letters, digits, underscores C(_),
- hyphens (-), and periods (.).
+ characters that can contain letters, digits, underscores (V(_)),
+ hyphens (V(-)), and periods (V(.)).
type: str
returned: success
vpc_id:
diff --git a/ansible_collections/community/general/plugins/modules/icinga2_feature.py b/ansible_collections/community/general/plugins/modules/icinga2_feature.py
index 6e6bc5416..0c79f6cba 100644
--- a/ansible_collections/community/general/plugins/modules/icinga2_feature.py
+++ b/ansible_collections/community/general/plugins/modules/icinga2_feature.py
@@ -37,10 +37,10 @@ options:
state:
type: str
description:
- - If set to C(present) and feature is disabled, then feature is enabled.
- - If set to C(present) and feature is already enabled, then nothing is changed.
- - If set to C(absent) and feature is enabled, then feature is disabled.
- - If set to C(absent) and feature is already disabled, then nothing is changed.
+ - If set to V(present) and feature is disabled, then feature is enabled.
+ - If set to V(present) and feature is already enabled, then nothing is changed.
+ - If set to V(absent) and feature is enabled, then feature is disabled.
+ - If set to V(absent) and feature is already disabled, then nothing is changed.
choices: [ "present", "absent" ]
default: present
'''
diff --git a/ansible_collections/community/general/plugins/modules/icinga2_host.py b/ansible_collections/community/general/plugins/modules/icinga2_host.py
index 7f25c55d9..ec04d8df7 100644
--- a/ansible_collections/community/general/plugins/modules/icinga2_host.py
+++ b/ansible_collections/community/general/plugins/modules/icinga2_host.py
@@ -31,13 +31,13 @@ options:
- HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path
use_proxy:
description:
- - If C(false), it will not use a proxy, even if one is defined in
+ - If V(false), it will not use a proxy, even if one is defined in
an environment variable on the target hosts.
type: bool
default: true
validate_certs:
description:
- - If C(false), SSL certificates will not be validated. This should only be used
+ - If V(false), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
type: bool
default: true
@@ -45,12 +45,12 @@ options:
type: str
description:
- The username for use in HTTP basic authentication.
- - This parameter can be used without C(url_password) for sites that allow empty passwords.
+ - This parameter can be used without O(url_password) for sites that allow empty passwords.
url_password:
type: str
description:
- The password for use in HTTP basic authentication.
- - If the C(url_username) parameter is not specified, the C(url_password) parameter will not be used.
+ - If the O(url_username) parameter is not specified, the O(url_password) parameter will not be used.
force_basic_auth:
description:
- httplib2, the library used by the uri module only sends authentication information when a webservice
@@ -64,12 +64,12 @@ options:
description:
- PEM formatted certificate chain file to be used for SSL client
authentication. This file can also include the key as well, and if
- the key is included, C(client_key) is not required.
+ the key is included, O(client_key) is not required.
client_key:
type: path
description:
- PEM formatted file that contains your private key to be used for SSL
- client authentication. If C(client_cert) contains both the certificate
+ client authentication. If O(client_cert) contains both the certificate
and key, this option is not required.
state:
type: str
@@ -101,12 +101,12 @@ options:
type: str
description:
- The name used to display the host.
- - If not specified, it defaults to the value of the I(name) parameter.
+ - If not specified, it defaults to the value of the O(name) parameter.
ip:
type: str
description:
- The IP address of the host.
- required: true
+ - This is no longer required since community.general 8.0.0.
variables:
type: dict
description:
@@ -243,7 +243,7 @@ def main():
template=dict(default=None),
check_command=dict(default="hostalive"),
display_name=dict(default=None),
- ip=dict(required=True),
+ ip=dict(),
variables=dict(type='dict', default=None),
)
@@ -306,7 +306,7 @@ def main():
module.exit_json(changed=False, name=name, data=data)
# Template attribute is not allowed in modification
- del data['attrs']['templates']
+ del data['templates']
ret = icinga.modify(name, data)
diff --git a/ansible_collections/community/general/plugins/modules/idrac_redfish_config.py b/ansible_collections/community/general/plugins/modules/idrac_redfish_config.py
index cc47e62d2..0388bf00f 100644
--- a/ansible_collections/community/general/plugins/modules/idrac_redfish_config.py
+++ b/ansible_collections/community/general/plugins/modules/idrac_redfish_config.py
@@ -33,9 +33,9 @@ options:
required: true
description:
- List of commands to execute on iDRAC.
- - I(SetManagerAttributes), I(SetLifecycleControllerAttributes) and
- I(SetSystemAttributes) are mutually exclusive commands when C(category)
- is I(Manager).
+ - V(SetManagerAttributes), V(SetLifecycleControllerAttributes) and
+ V(SetSystemAttributes) are mutually exclusive commands when O(category)
+ is V(Manager).
type: list
elements: str
baseuri:
diff --git a/ansible_collections/community/general/plugins/modules/idrac_redfish_info.py b/ansible_collections/community/general/plugins/modules/idrac_redfish_info.py
index aece61664..90b355d13 100644
--- a/ansible_collections/community/general/plugins/modules/idrac_redfish_info.py
+++ b/ansible_collections/community/general/plugins/modules/idrac_redfish_info.py
@@ -16,8 +16,6 @@ description:
- Builds Redfish URIs locally and sends them to remote iDRAC controllers to
get information back.
- For use with Dell EMC iDRAC operations that require Redfish OEM extensions.
- - This module was called C(idrac_redfish_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(community.general.idrac_redfish_info) module no longer returns C(ansible_facts)!
extends_documentation_fragment:
- community.general.attributes
- community.general.attributes.info_module
@@ -35,7 +33,7 @@ options:
required: true
description:
- List of commands to execute on iDRAC.
- - C(GetManagerAttributes) returns the list of dicts containing iDRAC,
+ - V(GetManagerAttributes) returns the list of dicts containing iDRAC,
LifecycleController and System attributes.
type: list
elements: str
diff --git a/ansible_collections/community/general/plugins/modules/ilo_redfish_command.py b/ansible_collections/community/general/plugins/modules/ilo_redfish_command.py
index 0ec385e73..e0e28f855 100644
--- a/ansible_collections/community/general/plugins/modules/ilo_redfish_command.py
+++ b/ansible_collections/community/general/plugins/modules/ilo_redfish_command.py
@@ -84,7 +84,7 @@ ilo_redfish_command:
type: dict
contains:
ret:
- description: Return True/False based on whether the operation was performed succesfully.
+ description: Return True/False based on whether the operation was performed successfully.
type: bool
msg:
description: Status of the operation performed on the iLO.
diff --git a/ansible_collections/community/general/plugins/modules/imc_rest.py b/ansible_collections/community/general/plugins/modules/imc_rest.py
index 4bbaad23a..113d341e8 100644
--- a/ansible_collections/community/general/plugins/modules/imc_rest.py
+++ b/ansible_collections/community/general/plugins/modules/imc_rest.py
@@ -51,16 +51,16 @@ options:
description:
- Name of the absolute path of the filename that includes the body
of the http request being sent to the Cisco IMC REST API.
- - Parameter C(path) is mutual exclusive with parameter C(content).
+ - Parameter O(path) is mutual exclusive with parameter O(content).
aliases: [ 'src', 'config_file' ]
type: path
content:
description:
- - When used instead of C(path), sets the content of the API requests directly.
+ - When used instead of O(path), sets the content of the API requests directly.
- This may be convenient to template simple requests, for anything complex use the M(ansible.builtin.template) module.
- You can collate multiple IMC XML fragments and they will be processed sequentially in a single stream,
the Cisco IMC output is subsequently merged.
- - Parameter C(content) is mutual exclusive with parameter C(path).
+ - Parameter O(content) is mutual exclusive with parameter O(path).
type: str
protocol:
description:
@@ -72,14 +72,14 @@ options:
description:
- The socket level timeout in seconds.
- This is the time that every single connection (every fragment) can spend.
- If this C(timeout) is reached, the module will fail with a
+ If this O(timeout) is reached, the module will fail with a
C(Connection failure) indicating that C(The read operation timed out).
default: 60
type: int
validate_certs:
description:
- - If C(false), SSL certificates will not be validated.
- - This should only set to C(false) used on personally controlled sites using self-signed certificates.
+ - If V(false), SSL certificates will not be validated.
+ - This should only set to V(false) used on personally controlled sites using self-signed certificates.
type: bool
default: true
notes:
@@ -88,7 +88,7 @@ notes:
- Any configConfMo change requested has a return status of 'modified', even if there was no actual change
from the previous configuration. As a result, this module will always report a change on subsequent runs.
In case this behaviour is fixed in a future update to Cisco IMC, this module will automatically adapt.
-- If you get a C(Connection failure) related to C(The read operation timed out) increase the C(timeout)
+- If you get a C(Connection failure) related to C(The read operation timed out) increase the O(timeout)
parameter. Some XML fragments can take longer than the default timeout.
- More information about the IMC REST API is available from
U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html)
@@ -100,7 +100,7 @@ EXAMPLES = r'''
hostname: '{{ imc_hostname }}'
username: '{{ imc_username }}'
password: '{{ imc_password }}'
- validate_certs: false
+ validate_certs: false # only do this when you trust the network!
content: |
<configConfMo><inConfig>
<computeRackUnit dn="sys/rack-unit-1" adminPower="down"/>
@@ -112,7 +112,7 @@ EXAMPLES = r'''
hostname: '{{ imc_hostname }}'
username: '{{ imc_username }}'
password: '{{ imc_password }}'
- validate_certs: false
+ validate_certs: false # only do this when you trust the network!
timeout: 120
content: |
<!-- Configure Serial-on-LAN -->
@@ -137,7 +137,7 @@ EXAMPLES = r'''
hostname: '{{ imc_hostname }}'
username: '{{ imc_username }}'
password: '{{ imc_password }}'
- validate_certs: false
+ validate_certs: false # only do this when you trust the network!
content: |
<!-- Configure PXE boot -->
<configConfMo><inConfig>
@@ -155,7 +155,7 @@ EXAMPLES = r'''
hostname: '{{ imc_host }}'
username: '{{ imc_username }}'
password: '{{ imc_password }}'
- validate_certs: false
+ validate_certs: false # only do this when you trust the network!
content: |
<configConfMo><inConfig>
<lsbootStorage dn="sys/rack-unit-1/boot-policy/storage-read-write" access="read-write" order="1" type="storage"/>
@@ -167,7 +167,7 @@ EXAMPLES = r'''
hostname: '{{ imc_host }}'
username: '{{ imc_username }}'
password: '{{ imc_password }}'
- validate_certs: false
+ validate_certs: false # only do this when you trust the network!
content: |
<configConfMo><inConfig>
<computeRackUnit dn="sys/rack-unit-1" usrLbl="Customer Lab - POD{{ pod_id }} - {{ inventory_hostname_short }}"/>
@@ -179,7 +179,7 @@ EXAMPLES = r'''
hostname: '{{ imc_host }}'
username: '{{ imc_username }}'
password: '{{ imc_password }}'
- validate_certs: false
+ validate_certs: false # only do this when you trust the network!
timeout: 120
content: |
<configConfMo><inConfig>
diff --git a/ansible_collections/community/general/plugins/modules/imgadm.py b/ansible_collections/community/general/plugins/modules/imgadm.py
index 6e4b81098..a247547fc 100644
--- a/ansible_collections/community/general/plugins/modules/imgadm.py
+++ b/ansible_collections/community/general/plugins/modules/imgadm.py
@@ -44,9 +44,9 @@ options:
required: true
choices: [ present, absent, deleted, imported, updated, vacuumed ]
description:
- - State the object operated on should be in. C(imported) is an alias for
- for C(present) and C(deleted) for C(absent). When set to C(vacuumed)
- and C(uuid) to C(*), it will remove all unused images.
+ - State the object operated on should be in. V(imported) is an alias for
+ for V(present) and V(deleted) for V(absent). When set to V(vacuumed)
+ and O(uuid=*), it will remove all unused images.
type: str
type:
@@ -60,11 +60,8 @@ options:
uuid:
required: false
description:
- - Image UUID. Can either be a full UUID or C(*) for all images.
+ - Image UUID. Can either be a full UUID or V(*) for all images.
type: str
-
-requirements:
- - python >= 2.6
'''
EXAMPLES = '''
@@ -142,7 +139,7 @@ class Imgadm(object):
self.uuid = module.params['uuid']
# Since there are a number of (natural) aliases, prevent having to look
- # them up everytime we operate on `state`.
+ # them up every time we operate on `state`.
if self.params['state'] in ['present', 'imported', 'updated']:
self.present = True
else:
@@ -174,7 +171,7 @@ class Imgadm(object):
# There is no feedback from imgadm(1M) to determine if anything
# was actually changed. So treat this as an 'always-changes' operation.
- # Note that 'imgadm -v' produces unparseable JSON...
+ # Note that 'imgadm -v' produces unparsable JSON...
self.changed = True
def manage_sources(self):
diff --git a/ansible_collections/community/general/plugins/modules/influxdb_database.py b/ansible_collections/community/general/plugins/modules/influxdb_database.py
index 046b16e18..a12326da5 100644
--- a/ansible_collections/community/general/plugins/modules/influxdb_database.py
+++ b/ansible_collections/community/general/plugins/modules/influxdb_database.py
@@ -17,7 +17,6 @@ description:
- Manage InfluxDB databases.
author: "Kamil Szczygiel (@kamsz)"
requirements:
- - "python >= 2.6"
- "influxdb >= 0.9"
- requests
attributes:
diff --git a/ansible_collections/community/general/plugins/modules/influxdb_query.py b/ansible_collections/community/general/plugins/modules/influxdb_query.py
index c2e3d8acc..fda98d184 100644
--- a/ansible_collections/community/general/plugins/modules/influxdb_query.py
+++ b/ansible_collections/community/general/plugins/modules/influxdb_query.py
@@ -16,7 +16,6 @@ description:
- Query data points from InfluxDB.
author: "René Moser (@resmo)"
requirements:
- - "python >= 2.6"
- "influxdb >= 0.9"
attributes:
check_mode:
diff --git a/ansible_collections/community/general/plugins/modules/influxdb_retention_policy.py b/ansible_collections/community/general/plugins/modules/influxdb_retention_policy.py
index 28d5450ff..f1c13a811 100644
--- a/ansible_collections/community/general/plugins/modules/influxdb_retention_policy.py
+++ b/ansible_collections/community/general/plugins/modules/influxdb_retention_policy.py
@@ -17,7 +17,6 @@ description:
- Manage InfluxDB retention policies.
author: "Kamil Szczygiel (@kamsz)"
requirements:
- - "python >= 2.6"
- "influxdb >= 0.9"
- requests
attributes:
@@ -46,14 +45,14 @@ options:
duration:
description:
- Determines how long InfluxDB should keep the data. If specified, it
- should be C(INF) or at least one hour. If not specified, C(INF) is
+ should be V(INF) or at least one hour. If not specified, V(INF) is
assumed. Supports complex duration expressions with multiple units.
- - Required only if I(state) is set to C(present).
+ - Required only if O(state) is set to V(present).
type: str
replication:
description:
- Determines how many independent copies of each point are stored in the cluster.
- - Required only if I(state) is set to C(present).
+ - Required only if O(state) is set to V(present).
type: int
default:
description:
@@ -115,7 +114,6 @@ EXAMPLES = r'''
duration: INF
replication: 1
ssl: false
- validate_certs: false
shard_group_duration: 1w
state: present
@@ -127,7 +125,6 @@ EXAMPLES = r'''
duration: 5d1h30m
replication: 1
ssl: false
- validate_certs: false
shard_group_duration: 1d10h30m
state: present
diff --git a/ansible_collections/community/general/plugins/modules/influxdb_user.py b/ansible_collections/community/general/plugins/modules/influxdb_user.py
index bbd0f8f5a..ca4201db1 100644
--- a/ansible_collections/community/general/plugins/modules/influxdb_user.py
+++ b/ansible_collections/community/general/plugins/modules/influxdb_user.py
@@ -18,7 +18,6 @@ description:
- Manage InfluxDB users.
author: "Vitaliy Zhhuta (@zhhuta)"
requirements:
- - "python >= 2.6"
- "influxdb >= 0.9"
attributes:
check_mode:
diff --git a/ansible_collections/community/general/plugins/modules/influxdb_write.py b/ansible_collections/community/general/plugins/modules/influxdb_write.py
index f95b6dae8..76e6449bb 100644
--- a/ansible_collections/community/general/plugins/modules/influxdb_write.py
+++ b/ansible_collections/community/general/plugins/modules/influxdb_write.py
@@ -16,7 +16,6 @@ description:
- Write data points into InfluxDB.
author: "René Moser (@resmo)"
requirements:
- - "python >= 2.6"
- "influxdb >= 0.9"
attributes:
check_mode:
diff --git a/ansible_collections/community/general/plugins/modules/ini_file.py b/ansible_collections/community/general/plugins/modules/ini_file.py
index 874f10ae0..ec71a9473 100644
--- a/ansible_collections/community/general/plugins/modules/ini_file.py
+++ b/ansible_collections/community/general/plugins/modules/ini_file.py
@@ -4,6 +4,7 @@
# Copyright (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
# Copyright (c) 2015, Ales Nosek <anosek.nosek () gmail.com>
# Copyright (c) 2017, Ansible Project
+# Copyright (c) 2023, Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
@@ -22,8 +23,7 @@ description:
- Manage (add, remove, change) individual settings in an INI-style file without having
to manage the file as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble).
- Adds missing sections if they don't exist.
- - Before Ansible 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file.
- - Since Ansible 2.3, this module adds missing ending newlines to files to keep in line with the POSIX standard, even when
+ - This module adds missing ending newlines to files to keep in line with the POSIX standard, even when
no other modifications need to be applied.
attributes:
check_mode:
@@ -34,35 +34,34 @@ options:
path:
description:
- Path to the INI-style file; this file is created if required.
- - Before Ansible 2.3 this option was only usable as I(dest).
type: path
required: true
aliases: [ dest ]
section:
description:
- - Section name in INI file. This is added if I(state=present) automatically when
+ - Section name in INI file. This is added if O(state=present) automatically when
a single value is being set.
- - If left empty, being omitted, or being set to C(null), the I(option) will be placed before the first I(section).
- - Using C(null) is also required if the config format does not support sections.
+ - If being omitted, the O(option) will be placed before the first O(section).
+ - Omitting O(section) is also required if the config format does not support sections.
type: str
option:
description:
- - If set (required for changing a I(value)), this is the name of the option.
- - May be omitted if adding/removing a whole I(section).
+ - If set (required for changing a O(value)), this is the name of the option.
+ - May be omitted if adding/removing a whole O(section).
type: str
value:
description:
- - The string value to be associated with an I(option).
- - May be omitted when removing an I(option).
- - Mutually exclusive with I(values).
- - I(value=v) is equivalent to I(values=[v]).
+ - The string value to be associated with an O(option).
+ - May be omitted when removing an O(option).
+ - Mutually exclusive with O(values).
+ - O(value=v) is equivalent to O(values=[v]).
type: str
values:
description:
- - The string value to be associated with an I(option).
- - May be omitted when removing an I(option).
- - Mutually exclusive with I(value).
- - I(value=v) is equivalent to I(values=[v]).
+ - The string value to be associated with an O(option).
+ - May be omitted when removing an O(option).
+ - Mutually exclusive with O(value).
+ - O(value=v) is equivalent to O(values=[v]).
type: list
elements: str
version_added: 3.6.0
@@ -74,22 +73,22 @@ options:
default: false
state:
description:
- - If set to C(absent) and I(exclusive) set to C(true) all matching I(option) lines are removed.
- - If set to C(absent) and I(exclusive) set to C(false) the specified I(option=value) lines are removed,
- but the other I(option)s with the same name are not touched.
- - If set to C(present) and I(exclusive) set to C(false) the specified I(option=values) lines are added,
- but the other I(option)s with the same name are not touched.
- - If set to C(present) and I(exclusive) set to C(true) all given I(option=values) lines will be
- added and the other I(option)s with the same name are removed.
+ - If set to V(absent) and O(exclusive) set to V(true) all matching O(option) lines are removed.
+ - If set to V(absent) and O(exclusive) set to V(false) the specified O(option=value) lines are removed,
+ but the other O(option)s with the same name are not touched.
+ - If set to V(present) and O(exclusive) set to V(false) the specified O(option=values) lines are added,
+ but the other O(option)s with the same name are not touched.
+ - If set to V(present) and O(exclusive) set to V(true) all given O(option=values) lines will be
+ added and the other O(option)s with the same name are removed.
type: str
choices: [ absent, present ]
default: present
exclusive:
description:
- - If set to C(true) (default), all matching I(option) lines are removed when I(state=absent),
- or replaced when I(state=present).
- - If set to C(false), only the specified I(value(s)) are added when I(state=present),
- or removed when I(state=absent), and existing ones are not modified.
+ - If set to V(true) (default), all matching O(option) lines are removed when O(state=absent),
+ or replaced when O(state=present).
+ - If set to V(false), only the specified O(value)/O(values) are added when O(state=present),
+ or removed when O(state=absent), and existing ones are not modified.
type: bool
default: true
version_added: 3.6.0
@@ -98,9 +97,15 @@ options:
- Do not insert spaces before and after '=' symbol.
type: bool
default: false
+ ignore_spaces:
+ description:
+ - Do not change a line if doing so would only add or remove spaces before or after the V(=) symbol.
+ type: bool
+ default: false
+ version_added: 7.5.0
create:
description:
- - If set to C(false), the module will fail if the file does not already exist.
+ - If set to V(false), the module will fail if the file does not already exist.
- By default it will create the file if it is missing.
type: bool
default: true
@@ -109,9 +114,23 @@ options:
- Allow option without value and without '=' symbol.
type: bool
default: false
+ modify_inactive_option:
+ description:
+ - By default the module replaces a commented line that matches the given option.
+ - Set this option to V(false) to avoid this. This is useful when you want to keep commented example
+ C(key=value) pairs for documentation purposes.
+ type: bool
+ default: true
+ version_added: 8.0.0
+ follow:
+ description:
+ - This flag indicates that filesystem links, if they exist, should be followed.
+ - O(follow=true) can modify O(path) when combined with parameters such as O(mode).
+ type: bool
+ default: false
+ version_added: 7.1.0
notes:
- - While it is possible to add an I(option) without specifying a I(value), this makes no sense.
- - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
+ - While it is possible to add an O(option) without specifying a O(value), this makes no sense.
- As of community.general 3.2.0, UTF-8 BOM markers are discarded when reading files.
author:
- Jan-Piet Mens (@jpmens)
@@ -119,7 +138,6 @@ author:
'''
EXAMPLES = r'''
-# Before Ansible 2.3, option 'dest' was used instead of 'path'
- name: Ensure "fav=lemonade is in section "[drinks]" in specified file
community.general.ini_file:
path: /etc/conf
@@ -157,6 +175,13 @@ EXAMPLES = r'''
- pepsi
mode: '0600'
state: present
+
+- name: Add "beverage=lemon juice" outside a section in specified file
+ community.general.ini_file:
+ path: /etc/conf
+ option: beverage
+ value: lemon juice
+ state: present
'''
import io
@@ -171,27 +196,35 @@ from ansible.module_utils.common.text.converters import to_bytes, to_text
def match_opt(option, line):
option = re.escape(option)
- return re.match('[#;]?( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line)
+ return re.match('([#;]?)( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line)
def match_active_opt(option, line):
option = re.escape(option)
- return re.match('( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line)
-
-
-def update_section_line(changed, section_lines, index, changed_lines, newline, msg):
- option_changed = section_lines[index] != newline
+ return re.match('()( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line)
+
+
+def update_section_line(option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg):
+ option_changed = None
+ if ignore_spaces:
+ old_match = match_opt(option, section_lines[index])
+ if not old_match.group(1):
+ new_match = match_opt(option, newline)
+ option_changed = old_match.group(7) != new_match.group(7)
+ if option_changed is None:
+ option_changed = section_lines[index] != newline
+ if option_changed:
+ section_lines[index] = newline
changed = changed or option_changed
if option_changed:
msg = 'option changed'
- section_lines[index] = newline
changed_lines[index] = 1
return (changed, msg)
def do_ini(module, filename, section=None, option=None, values=None,
state='present', exclusive=True, backup=False, no_extra_spaces=False,
- create=True, allow_no_value=False):
+ ignore_spaces=False, create=True, allow_no_value=False, modify_inactive_option=True, follow=False):
if section is not None:
section = to_text(section)
@@ -210,15 +243,20 @@ def do_ini(module, filename, section=None, option=None, values=None,
after_header='%s (content)' % filename,
)
- if not os.path.exists(filename):
+ if follow and os.path.islink(filename):
+ target_filename = os.path.realpath(filename)
+ else:
+ target_filename = filename
+
+ if not os.path.exists(target_filename):
if not create:
- module.fail_json(rc=257, msg='Destination %s does not exist!' % filename)
- destpath = os.path.dirname(filename)
+ module.fail_json(rc=257, msg='Destination %s does not exist!' % target_filename)
+ destpath = os.path.dirname(target_filename)
if not os.path.exists(destpath) and not module.check_mode:
os.makedirs(destpath)
ini_lines = []
else:
- with io.open(filename, 'r', encoding="utf-8-sig") as ini_file:
+ with io.open(target_filename, 'r', encoding="utf-8-sig") as ini_file:
ini_lines = [to_text(line) for line in ini_file.readlines()]
if module._diff:
@@ -266,9 +304,11 @@ def do_ini(module, filename, section=None, option=None, values=None,
before = after = []
section_lines = []
+ section_pattern = re.compile(to_text(r'^\[\s*%s\s*]' % re.escape(section.strip())))
+
for index, line in enumerate(ini_lines):
# find start and end of section
- if line.startswith(u'[%s]' % section):
+ if section_pattern.match(line):
within_section = True
section_start = index
elif line.startswith(u'['):
@@ -283,6 +323,12 @@ def do_ini(module, filename, section=None, option=None, values=None,
# Keep track of changed section_lines
changed_lines = [0] * len(section_lines)
+ # Determine whether to consider using commented out/inactive options or only active ones
+ if modify_inactive_option:
+ match_function = match_opt
+ else:
+ match_function = match_active_opt
+
# handling multiple instances of option=value when state is 'present' with/without exclusive is a bit complex
#
# 1. edit all lines where we have a option=value pair with a matching value in values[]
@@ -292,10 +338,10 @@ def do_ini(module, filename, section=None, option=None, values=None,
if state == 'present' and option:
for index, line in enumerate(section_lines):
- if match_opt(option, line):
- match = match_opt(option, line)
- if values and match.group(6) in values:
- matched_value = match.group(6)
+ if match_function(option, line):
+ match = match_function(option, line)
+ if values and match.group(7) in values:
+ matched_value = match.group(7)
if not matched_value and allow_no_value:
# replace existing option with no value line(s)
newline = u'%s\n' % option
@@ -303,12 +349,12 @@ def do_ini(module, filename, section=None, option=None, values=None,
else:
# replace existing option=value line(s)
newline = assignment_format % (option, matched_value)
- (changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg)
+ (changed, msg) = update_section_line(option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg)
values.remove(matched_value)
elif not values and allow_no_value:
# replace existing option with no value line(s)
newline = u'%s\n' % option
- (changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg)
+ (changed, msg) = update_section_line(option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg)
option_no_value_present = True
break
@@ -316,14 +362,14 @@ def do_ini(module, filename, section=None, option=None, values=None,
# override option with no value to option with value if not allow_no_value
if len(values) > 0:
for index, line in enumerate(section_lines):
- if not changed_lines[index] and match_opt(option, line):
+ if not changed_lines[index] and match_function(option, line):
newline = assignment_format % (option, values.pop(0))
- (changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg)
+ (changed, msg) = update_section_line(option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg)
if len(values) == 0:
break
# remove all remaining option occurrences from the rest of the section
for index in range(len(section_lines) - 1, 0, -1):
- if not changed_lines[index] and match_opt(option, section_lines[index]):
+ if not changed_lines[index] and match_function(option, section_lines[index]):
del section_lines[index]
del changed_lines[index]
changed = True
@@ -367,7 +413,7 @@ def do_ini(module, filename, section=None, option=None, values=None,
section_lines = new_section_lines
elif not exclusive and len(values) > 0:
# delete specified option=value line(s)
- new_section_lines = [i for i in section_lines if not (match_active_opt(option, i) and match_active_opt(option, i).group(6) in values)]
+ new_section_lines = [i for i in section_lines if not (match_active_opt(option, i) and match_active_opt(option, i).group(7) in values)]
if section_lines != new_section_lines:
changed = True
msg = 'option changed'
@@ -404,7 +450,7 @@ def do_ini(module, filename, section=None, option=None, values=None,
backup_file = None
if changed and not module.check_mode:
if backup:
- backup_file = module.backup_local(filename)
+ backup_file = module.backup_local(target_filename)
encoded_ini_lines = [to_bytes(line) for line in ini_lines]
try:
@@ -416,10 +462,10 @@ def do_ini(module, filename, section=None, option=None, values=None,
module.fail_json(msg="Unable to create temporary file %s", traceback=traceback.format_exc())
try:
- module.atomic_move(tmpfile, filename)
+ module.atomic_move(tmpfile, target_filename)
except IOError:
module.ansible.fail_json(msg='Unable to move temporary \
- file %s to %s, IOError' % (tmpfile, filename), traceback=traceback.format_exc())
+ file %s to %s, IOError' % (tmpfile, target_filename), traceback=traceback.format_exc())
return (changed, backup_file, diff, msg)
@@ -437,8 +483,11 @@ def main():
state=dict(type='str', default='present', choices=['absent', 'present']),
exclusive=dict(type='bool', default=True),
no_extra_spaces=dict(type='bool', default=False),
+ ignore_spaces=dict(type='bool', default=False),
allow_no_value=dict(type='bool', default=False),
- create=dict(type='bool', default=True)
+ modify_inactive_option=dict(type='bool', default=True),
+ create=dict(type='bool', default=True),
+ follow=dict(type='bool', default=False)
),
mutually_exclusive=[
['value', 'values']
@@ -456,8 +505,11 @@ def main():
exclusive = module.params['exclusive']
backup = module.params['backup']
no_extra_spaces = module.params['no_extra_spaces']
+ ignore_spaces = module.params['ignore_spaces']
allow_no_value = module.params['allow_no_value']
+ modify_inactive_option = module.params['modify_inactive_option']
create = module.params['create']
+ follow = module.params['follow']
if state == 'present' and not allow_no_value and value is None and not values:
module.fail_json(msg="Parameter 'value(s)' must be defined if state=present and allow_no_value=False.")
@@ -467,7 +519,9 @@ def main():
elif values is None:
values = []
- (changed, backup_file, diff, msg) = do_ini(module, path, section, option, values, state, exclusive, backup, no_extra_spaces, create, allow_no_value)
+ (changed, backup_file, diff, msg) = do_ini(
+ module, path, section, option, values, state, exclusive, backup,
+ no_extra_spaces, ignore_spaces, create, allow_no_value, modify_inactive_option, follow)
if not module.check_mode and os.path.exists(path):
file_args = module.load_file_common_arguments(module.params)
diff --git a/ansible_collections/community/general/plugins/modules/installp.py b/ansible_collections/community/general/plugins/modules/installp.py
index 41064363d..4b5a6949c 100644
--- a/ansible_collections/community/general/plugins/modules/installp.py
+++ b/ansible_collections/community/general/plugins/modules/installp.py
@@ -32,7 +32,7 @@ options:
name:
description:
- One or more packages to install or remove.
- - Use C(all) to install all packages available on informed C(repository_path).
+ - Use V(all) to install all packages available on informed O(repository_path).
type: list
elements: str
required: true
@@ -133,7 +133,7 @@ def _check_new_pkg(module, package, repository_path):
def _check_installed_pkg(module, package, repository_path):
"""
Check the package on AIX.
- It verifies if the package is installed and informations
+ It verifies if the package is installed and information
:param module: Ansible module parameters spec.
:param package: Package/fileset name.
diff --git a/ansible_collections/community/general/plugins/modules/interfaces_file.py b/ansible_collections/community/general/plugins/modules/interfaces_file.py
index f19c019f4..98103082e 100644
--- a/ansible_collections/community/general/plugins/modules/interfaces_file.py
+++ b/ansible_collections/community/general/plugins/modules/interfaces_file.py
@@ -12,14 +12,14 @@ __metaclass__ = type
DOCUMENTATION = '''
---
module: interfaces_file
-short_description: Tweak settings in /etc/network/interfaces files
+short_description: Tweak settings in C(/etc/network/interfaces) files
extends_documentation_fragment:
- ansible.builtin.files
- community.general.attributes
description:
- Manage (add, remove, change) individual interface options in an interfaces-style file without having
to manage the file as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble). Interface has to be presented in a file.
- - Read information about interfaces from interfaces-styled files
+ - Read information about interfaces from interfaces-styled files.
attributes:
check_mode:
support: full
@@ -29,27 +29,27 @@ options:
dest:
type: path
description:
- - Path to the interfaces file
+ - Path to the interfaces file.
default: /etc/network/interfaces
iface:
type: str
description:
- - Name of the interface, required for value changes or option remove
+ - Name of the interface, required for value changes or option remove.
address_family:
type: str
description:
- - Address family of the interface, useful if same interface name is used for both inet and inet6
+ - Address family of the interface, useful if same interface name is used for both V(inet) and V(inet6).
option:
type: str
description:
- - Name of the option, required for value changes or option remove
+ - Name of the option, required for value changes or option remove.
value:
type: str
description:
- - If I(option) is not presented for the I(interface) and I(state) is C(present) option will be added.
- If I(option) already exists and is not C(pre-up), C(up), C(post-up) or C(down), it's value will be updated.
- C(pre-up), C(up), C(post-up) and C(down) options can't be updated, only adding new options, removing existing
- ones or cleaning the whole option set are supported
+ - If O(option) is not presented for the O(iface) and O(state) is V(present) option will be added.
+ If O(option) already exists and is not V(pre-up), V(up), V(post-up) or V(down), it's value will be updated.
+ V(pre-up), V(up), V(post-up) and V(down) options cannot be updated, only adding new options, removing existing
+ ones or cleaning the whole option set are supported.
backup:
description:
- Create a backup file including the timestamp information so you can get
@@ -59,77 +59,81 @@ options:
state:
type: str
description:
- - If set to C(absent) the option or section will be removed if present instead of created.
+ - If set to V(absent) the option or section will be removed if present instead of created.
default: "present"
choices: [ "present", "absent" ]
notes:
- - If option is defined multiple times last one will be updated but all will be deleted in case of an absent state
+ - If option is defined multiple times last one will be updated but all will be deleted in case of an absent state.
requirements: []
author: "Roman Belyakovsky (@hryamzik)"
'''
RETURN = '''
dest:
- description: destination file/path
+ description: Destination file/path.
returned: success
type: str
sample: "/etc/network/interfaces"
ifaces:
- description: interfaces dictionary
+ description: Interfaces dictionary.
returned: success
- type: complex
+ type: dict
contains:
ifaces:
- description: interface dictionary
+ description: Interface dictionary.
returned: success
type: dict
contains:
eth0:
- description: Name of the interface
+ description: Name of the interface.
returned: success
type: dict
contains:
address_family:
- description: interface address family
+ description: Interface address family.
returned: success
type: str
sample: "inet"
method:
- description: interface method
+ description: Interface method.
returned: success
type: str
sample: "manual"
mtu:
- description: other options, all values returned as strings
+ description: Other options, all values returned as strings.
returned: success
type: str
sample: "1500"
pre-up:
- description: list of C(pre-up) scripts
+ description: List of C(pre-up) scripts.
returned: success
type: list
+ elements: str
sample:
- "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
- "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
up:
- description: list of C(up) scripts
+ description: List of C(up) scripts.
returned: success
type: list
+ elements: str
sample:
- "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
- "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
post-up:
- description: list of C(post-up) scripts
+ description: List of C(post-up) scripts.
returned: success
type: list
+ elements: str
sample:
- "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
- "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
down:
- description: list of C(down) scripts
+ description: List of C(down) scripts.
returned: success
type: list
+ elements: str
sample:
- "route del -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
- "route del -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
@@ -336,6 +340,8 @@ def addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_option
changed = False
for ln in lines:
if ln.get('line_type', '') == 'iface' and ln.get('iface', '') == iface and value != ln.get('params', {}).get('method', ''):
+ if address_family is not None and ln.get('address_family') != address_family:
+ continue
changed = True
ln['line'] = re.sub(ln.get('params', {}).get('method', '') + '$', value, ln.get('line'))
ln['params']['method'] = value
diff --git a/ansible_collections/community/general/plugins/modules/ipa_config.py b/ansible_collections/community/general/plugins/modules/ipa_config.py
index ec94b58d4..871643fd7 100644
--- a/ansible_collections/community/general/plugins/modules/ipa_config.py
+++ b/ansible_collections/community/general/plugins/modules/ipa_config.py
@@ -40,6 +40,12 @@ options:
aliases: ["primarygroup"]
type: str
version_added: '2.5.0'
+ ipagroupobjectclasses:
+ description: A list of group objectclasses.
+ aliases: ["groupobjectclasses"]
+ type: list
+ elements: str
+ version_added: '7.3.0'
ipagroupsearchfields:
description: A list of fields to search in when searching for groups.
aliases: ["groupsearchfields"]
@@ -85,12 +91,21 @@ options:
elements: str
version_added: '3.7.0'
ipauserauthtype:
- description: The authentication type to use by default.
+ description:
+ - The authentication type to use by default.
+ - The choice V(idp) has been added in community.general 7.3.0.
+ - The choice V(passkey) has been added in community.general 8.1.0.
aliases: ["userauthtype"]
- choices: ["password", "radius", "otp", "pkinit", "hardened", "disabled"]
+ choices: ["password", "radius", "otp", "pkinit", "hardened", "idp", "passkey", "disabled"]
type: list
elements: str
version_added: '2.5.0'
+ ipauserobjectclasses:
+ description: A list of user objectclasses.
+ aliases: ["userobjectclasses"]
+ type: list
+ elements: str
+ version_added: '7.3.0'
ipausersearchfields:
description: A list of fields to search in when searching for users.
aliases: ["usersearchfields"]
@@ -235,11 +250,12 @@ class ConfigIPAClient(IPAClient):
def get_config_dict(ipaconfigstring=None, ipadefaultloginshell=None,
ipadefaultemaildomain=None, ipadefaultprimarygroup=None,
- ipagroupsearchfields=None, ipahomesrootdir=None,
- ipakrbauthzdata=None, ipamaxusernamelength=None,
- ipapwdexpadvnotify=None, ipasearchrecordslimit=None,
- ipasearchtimelimit=None, ipaselinuxusermaporder=None,
- ipauserauthtype=None, ipausersearchfields=None):
+ ipagroupsearchfields=None, ipagroupobjectclasses=None,
+ ipahomesrootdir=None, ipakrbauthzdata=None,
+ ipamaxusernamelength=None, ipapwdexpadvnotify=None,
+ ipasearchrecordslimit=None, ipasearchtimelimit=None,
+ ipaselinuxusermaporder=None, ipauserauthtype=None,
+ ipausersearchfields=None, ipauserobjectclasses=None):
config = {}
if ipaconfigstring is not None:
config['ipaconfigstring'] = ipaconfigstring
@@ -249,6 +265,8 @@ def get_config_dict(ipaconfigstring=None, ipadefaultloginshell=None,
config['ipadefaultemaildomain'] = ipadefaultemaildomain
if ipadefaultprimarygroup is not None:
config['ipadefaultprimarygroup'] = ipadefaultprimarygroup
+ if ipagroupobjectclasses is not None:
+ config['ipagroupobjectclasses'] = ipagroupobjectclasses
if ipagroupsearchfields is not None:
config['ipagroupsearchfields'] = ','.join(ipagroupsearchfields)
if ipahomesrootdir is not None:
@@ -267,6 +285,8 @@ def get_config_dict(ipaconfigstring=None, ipadefaultloginshell=None,
config['ipaselinuxusermaporder'] = '$'.join(ipaselinuxusermaporder)
if ipauserauthtype is not None:
config['ipauserauthtype'] = ipauserauthtype
+ if ipauserobjectclasses is not None:
+ config['ipauserobjectclasses'] = ipauserobjectclasses
if ipausersearchfields is not None:
config['ipausersearchfields'] = ','.join(ipausersearchfields)
@@ -283,6 +303,7 @@ def ensure(module, client):
ipadefaultloginshell=module.params.get('ipadefaultloginshell'),
ipadefaultemaildomain=module.params.get('ipadefaultemaildomain'),
ipadefaultprimarygroup=module.params.get('ipadefaultprimarygroup'),
+ ipagroupobjectclasses=module.params.get('ipagroupobjectclasses'),
ipagroupsearchfields=module.params.get('ipagroupsearchfields'),
ipahomesrootdir=module.params.get('ipahomesrootdir'),
ipakrbauthzdata=module.params.get('ipakrbauthzdata'),
@@ -293,6 +314,7 @@ def ensure(module, client):
ipaselinuxusermaporder=module.params.get('ipaselinuxusermaporder'),
ipauserauthtype=module.params.get('ipauserauthtype'),
ipausersearchfields=module.params.get('ipausersearchfields'),
+ ipauserobjectclasses=module.params.get('ipauserobjectclasses'),
)
ipa_config = client.config_show()
diff = get_config_diff(client, ipa_config, module_config)
@@ -322,6 +344,8 @@ def main():
ipadefaultloginshell=dict(type='str', aliases=['loginshell']),
ipadefaultemaildomain=dict(type='str', aliases=['emaildomain']),
ipadefaultprimarygroup=dict(type='str', aliases=['primarygroup']),
+ ipagroupobjectclasses=dict(type='list', elements='str',
+ aliases=['groupobjectclasses']),
ipagroupsearchfields=dict(type='list', elements='str',
aliases=['groupsearchfields']),
ipahomesrootdir=dict(type='str', aliases=['homesrootdir']),
@@ -337,9 +361,11 @@ def main():
ipauserauthtype=dict(type='list', elements='str',
aliases=['userauthtype'],
choices=["password", "radius", "otp", "pkinit",
- "hardened", "disabled"]),
+ "hardened", "idp", "passkey", "disabled"]),
ipausersearchfields=dict(type='list', elements='str',
aliases=['usersearchfields']),
+ ipauserobjectclasses=dict(type='list', elements='str',
+ aliases=['userobjectclasses']),
)
module = AnsibleModule(
diff --git a/ansible_collections/community/general/plugins/modules/ipa_dnsrecord.py b/ansible_collections/community/general/plugins/modules/ipa_dnsrecord.py
index b1a90141b..cb4ce03dd 100644
--- a/ansible_collections/community/general/plugins/modules/ipa_dnsrecord.py
+++ b/ansible_collections/community/general/plugins/modules/ipa_dnsrecord.py
@@ -35,22 +35,24 @@ options:
record_type:
description:
- The type of DNS record name.
- - Currently, 'A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'PTR', 'TXT', 'SRV' and 'MX' are supported.
+ - Currently, 'A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'NS', 'PTR', 'TXT', 'SRV' and 'MX' are supported.
- "'A6', 'CNAME', 'DNAME' and 'TXT' are added in version 2.5."
- "'SRV' and 'MX' are added in version 2.8."
+ - "'NS' are added in comunity.general 8.2.0."
required: false
default: 'A'
- choices: ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'MX', 'PTR', 'SRV', 'TXT']
+ choices: ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'MX', 'NS', 'PTR', 'SRV', 'TXT']
type: str
record_value:
description:
- Manage DNS record name with this value.
- - Mutually exclusive with I(record_values), and exactly one of I(record_value) and I(record_values) has to be specified.
- - Use I(record_values) if you need to specify multiple values.
+ - Mutually exclusive with O(record_values), and exactly one of O(record_value) and O(record_values) has to be specified.
+ - Use O(record_values) if you need to specify multiple values.
- In the case of 'A' or 'AAAA' record types, this will be the IP address.
- In the case of 'A6' record type, this will be the A6 Record data.
- In the case of 'CNAME' record type, this will be the hostname.
- In the case of 'DNAME' record type, this will be the DNAME target.
+ - In the case of 'NS' record type, this will be the name server hostname. Hostname must already have a valid A or AAAA record.
- In the case of 'PTR' record type, this will be the hostname.
- In the case of 'TXT' record type, this will be a text.
- In the case of 'SRV' record type, this will be a service record.
@@ -59,11 +61,12 @@ options:
record_values:
description:
- Manage DNS record name with this value.
- - Mutually exclusive with I(record_value), and exactly one of I(record_value) and I(record_values) has to be specified.
+ - Mutually exclusive with O(record_value), and exactly one of O(record_value) and O(record_values) has to be specified.
- In the case of 'A' or 'AAAA' record types, this will be the IP address.
- In the case of 'A6' record type, this will be the A6 Record data.
- In the case of 'CNAME' record type, this will be the hostname.
- In the case of 'DNAME' record type, this will be the DNAME target.
+ - In the case of 'NS' record type, this will be the name server hostname. Hostname must already have a valid A or AAAA record.
- In the case of 'PTR' record type, this will be the hostname.
- In the case of 'TXT' record type, this will be a text.
- In the case of 'SRV' record type, this will be a service record.
@@ -73,7 +76,7 @@ options:
record_ttl:
description:
- Set the TTL for the record.
- - Applies only when adding a new or changing the value of I(record_value) or I(record_values).
+ - Applies only when adding a new or changing the value of O(record_value) or O(record_values).
required: false
type: int
state:
@@ -162,6 +165,16 @@ EXAMPLES = r'''
ipa_user: admin
ipa_pass: topsecret
state: absent
+
+- name: Ensure an NS record for a subdomain is present
+ community,general.ipa_dnsrecord:
+ name: subdomain
+ zone_name: example.com
+ record_type: 'NS'
+ record_value: 'ns1.subdomain.exmaple.com'
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: ChangeMe!
'''
RETURN = r'''
@@ -205,6 +218,8 @@ class DNSRecordIPAClient(IPAClient):
item.update(cname_part_hostname=value)
elif details['record_type'] == 'DNAME':
item.update(dname_part_target=value)
+ elif details['record_type'] == 'NS':
+ item.update(ns_part_hostname=value)
elif details['record_type'] == 'PTR':
item.update(ptr_part_hostname=value)
elif details['record_type'] == 'TXT':
@@ -241,6 +256,8 @@ def get_dnsrecord_dict(details=None):
module_dnsrecord.update(cnamerecord=details['record_values'])
elif details['record_type'] == 'DNAME' and details['record_values']:
module_dnsrecord.update(dnamerecord=details['record_values'])
+ elif details['record_type'] == 'NS' and details['record_values']:
+ module_dnsrecord.update(nsrecord=details['record_values'])
elif details['record_type'] == 'PTR' and details['record_values']:
module_dnsrecord.update(ptrrecord=details['record_values'])
elif details['record_type'] == 'TXT' and details['record_values']:
@@ -311,7 +328,7 @@ def ensure(module, client):
def main():
- record_types = ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'PTR', 'TXT', 'SRV', 'MX']
+ record_types = ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'NS', 'PTR', 'TXT', 'SRV', 'MX']
argument_spec = ipa_argument_spec()
argument_spec.update(
zone_name=dict(type='str', required=True),
diff --git a/ansible_collections/community/general/plugins/modules/ipa_dnszone.py b/ansible_collections/community/general/plugins/modules/ipa_dnszone.py
index 06c93841e..6699b0525 100644
--- a/ansible_collections/community/general/plugins/modules/ipa_dnszone.py
+++ b/ansible_collections/community/general/plugins/modules/ipa_dnszone.py
@@ -152,7 +152,8 @@ def ensure(module, client):
changed = True
if not module.check_mode:
client.dnszone_add(zone_name=zone_name, details={'idnsallowdynupdate': dynamicupdate, 'idnsallowsyncptr': allowsyncptr})
- elif ipa_dnszone['idnsallowdynupdate'][0] != str(dynamicupdate).upper() or ipa_dnszone['idnsallowsyncptr'][0] != str(allowsyncptr).upper():
+ elif ipa_dnszone['idnsallowdynupdate'][0] != str(dynamicupdate).upper() or \
+ ipa_dnszone.get('idnsallowsyncptr') and ipa_dnszone['idnsallowsyncptr'][0] != str(allowsyncptr).upper():
changed = True
if not module.check_mode:
client.dnszone_mod(zone_name=zone_name, details={'idnsallowdynupdate': dynamicupdate, 'idnsallowsyncptr': allowsyncptr})
diff --git a/ansible_collections/community/general/plugins/modules/ipa_group.py b/ansible_collections/community/general/plugins/modules/ipa_group.py
index 87e7f0e66..92470606f 100644
--- a/ansible_collections/community/general/plugins/modules/ipa_group.py
+++ b/ansible_collections/community/general/plugins/modules/ipa_group.py
@@ -22,8 +22,8 @@ attributes:
options:
append:
description:
- - If C(true), add the listed I(user) and I(group) to the group members.
- - If C(false), only the listed I(user) and I(group) will be group members, removing any other members.
+ - If V(true), add the listed O(user) and O(group) to the group members.
+ - If V(false), only the listed O(user) and O(group) will be group members, removing any other members.
default: false
type: bool
version_added: 4.0.0
@@ -50,9 +50,9 @@ options:
group:
description:
- List of group names assigned to this group.
- - If I(append=false) and an empty list is passed all groups will be removed from this group.
+ - If O(append=false) and an empty list is passed all groups will be removed from this group.
- Groups that are already assigned but not passed will be removed.
- - If I(append=true) the listed groups will be assigned without removing other groups.
+ - If O(append=true) the listed groups will be assigned without removing other groups.
- If option is omitted assigned groups will not be checked or changed.
type: list
elements: str
@@ -63,20 +63,20 @@ options:
user:
description:
- List of user names assigned to this group.
- - If I(append=false) and an empty list is passed all users will be removed from this group.
+ - If O(append=false) and an empty list is passed all users will be removed from this group.
- Users that are already assigned but not passed will be removed.
- - If I(append=true) the listed users will be assigned without removing other users.
+ - If O(append=true) the listed users will be assigned without removing other users.
- If option is omitted assigned users will not be checked or changed.
type: list
elements: str
external_user:
description:
- List of external users assigned to this group.
- - Behaves identically to I(user) with respect to I(append) attribute.
- - List entries can be in C(DOMAIN\\username) or SID format.
+ - Behaves identically to O(user) with respect to O(append) attribute.
+ - List entries can be in V(DOMAIN\\\\username) or SID format.
- Unless SIDs are provided, the module will always attempt to make changes even if the group already has all the users.
This is because only SIDs are returned by IPA query.
- - I(external=true) is needed for this option to work.
+ - O(external=true) is needed for this option to work.
type: list
elements: str
version_added: 6.3.0
diff --git a/ansible_collections/community/general/plugins/modules/ipa_hbacrule.py b/ansible_collections/community/general/plugins/modules/ipa_hbacrule.py
index b7633262b..77a4d0d48 100644
--- a/ansible_collections/community/general/plugins/modules/ipa_hbacrule.py
+++ b/ansible_collections/community/general/plugins/modules/ipa_hbacrule.py
@@ -161,6 +161,7 @@ import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
from ansible.module_utils.common.text.converters import to_native
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
class HBACRuleIPAClient(IPAClient):
@@ -231,10 +232,17 @@ def ensure(module, client):
name = module.params['cn']
state = module.params['state']
+ ipa_version = client.get_ipa_version()
if state in ['present', 'enabled']:
- ipaenabledflag = 'TRUE'
+ if LooseVersion(ipa_version) < LooseVersion('4.9.10'):
+ ipaenabledflag = 'TRUE'
+ else:
+ ipaenabledflag = True
else:
- ipaenabledflag = 'FALSE'
+ if LooseVersion(ipa_version) < LooseVersion('4.9.10'):
+ ipaenabledflag = 'FALSE'
+ else:
+ ipaenabledflag = False
host = module.params['host']
hostcategory = module.params['hostcategory']
diff --git a/ansible_collections/community/general/plugins/modules/ipa_host.py b/ansible_collections/community/general/plugins/modules/ipa_host.py
index d561401d4..b37a606d7 100644
--- a/ansible_collections/community/general/plugins/modules/ipa_host.py
+++ b/ansible_collections/community/general/plugins/modules/ipa_host.py
@@ -80,7 +80,7 @@ options:
type: str
update_dns:
description:
- - If set C("True") with state as C("absent"), then removes DNS records of the host managed by FreeIPA DNS.
+ - If set V(true) with O(state=absent), then removes DNS records of the host managed by FreeIPA DNS.
- This option has no effect for states other than "absent".
type: bool
random_password:
@@ -118,7 +118,6 @@ EXAMPLES = r'''
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
- validate_certs: false
random_password: true
- name: Ensure host is disabled
diff --git a/ansible_collections/community/general/plugins/modules/ipa_hostgroup.py b/ansible_collections/community/general/plugins/modules/ipa_hostgroup.py
index 12232de89..70749c35b 100644
--- a/ansible_collections/community/general/plugins/modules/ipa_hostgroup.py
+++ b/ansible_collections/community/general/plugins/modules/ipa_hostgroup.py
@@ -22,8 +22,8 @@ attributes:
options:
append:
description:
- - If C(true), add the listed I(host) to the I(hostgroup).
- - If C(false), only the listed I(host) will be in I(hostgroup), removing any other hosts.
+ - If V(true), add the listed O(host) to the O(hostgroup).
+ - If V(false), only the listed O(host) will be in O(hostgroup), removing any other hosts.
default: false
type: bool
version_added: 6.6.0
diff --git a/ansible_collections/community/general/plugins/modules/ipa_otptoken.py b/ansible_collections/community/general/plugins/modules/ipa_otptoken.py
index f25ab6023..567674f93 100644
--- a/ansible_collections/community/general/plugins/modules/ipa_otptoken.py
+++ b/ansible_collections/community/general/plugins/modules/ipa_otptoken.py
@@ -48,7 +48,7 @@ options:
description: Assigned user of the token.
type: str
enabled:
- description: Mark the token as enabled (default C(true)).
+ description: Mark the token as enabled (default V(true)).
default: true
type: bool
notbefore:
@@ -237,7 +237,7 @@ def get_otptoken_dict(ansible_to_ipa, uniqueid=None, newuniqueid=None, otptype=N
if owner is not None:
otptoken[ansible_to_ipa['owner']] = owner
if enabled is not None:
- otptoken[ansible_to_ipa['enabled']] = 'FALSE' if enabled else 'TRUE'
+ otptoken[ansible_to_ipa['enabled']] = False if enabled else True
if notbefore is not None:
otptoken[ansible_to_ipa['notbefore']] = notbefore + 'Z'
if notafter is not None:
diff --git a/ansible_collections/community/general/plugins/modules/ipa_pwpolicy.py b/ansible_collections/community/general/plugins/modules/ipa_pwpolicy.py
index 6a6c4318b..ba7d70291 100644
--- a/ansible_collections/community/general/plugins/modules/ipa_pwpolicy.py
+++ b/ansible_collections/community/general/plugins/modules/ipa_pwpolicy.py
@@ -64,6 +64,26 @@ options:
lockouttime:
description: Period (in seconds) for which users are locked out.
type: str
+ gracelimit:
+ description: Maximum number of LDAP logins after password expiration.
+ type: int
+ version_added: 8.2.0
+ maxrepeat:
+ description: Maximum number of allowed same consecutive characters in the new password.
+ type: int
+ version_added: 8.2.0
+ maxsequence:
+ description: Maximum length of monotonic character sequences in the new password. An example of a monotonic sequence of length 5 is V(12345).
+ type: int
+ version_added: 8.2.0
+ dictcheck:
+ description: Check whether the password (with possible modifications) matches a word in a dictionary (using cracklib).
+ type: bool
+ version_added: 8.2.0
+ usercheck:
+ description: Check whether the password (with possible modifications) contains the user name in some form (if the name has > 3 characters).
+ type: bool
+ version_added: 8.2.0
extends_documentation_fragment:
- community.general.ipa.documentation
- community.general.attributes
@@ -93,9 +113,15 @@ EXAMPLES = r'''
historylength: '16'
minclasses: '4'
priority: '10'
+ minlength: '6'
maxfailcount: '4'
failinterval: '600'
lockouttime: '1200'
+ gracelimit: 3
+ maxrepeat: 3
+ maxsequence: 3
+ dictcheck: true
+ usercheck: true
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
@@ -159,26 +185,35 @@ class PwPolicyIPAClient(IPAClient):
def get_pwpolicy_dict(maxpwdlife=None, minpwdlife=None, historylength=None, minclasses=None,
minlength=None, priority=None, maxfailcount=None, failinterval=None,
- lockouttime=None):
+ lockouttime=None, gracelimit=None, maxrepeat=None, maxsequence=None, dictcheck=None, usercheck=None):
pwpolicy = {}
- if maxpwdlife is not None:
- pwpolicy['krbmaxpwdlife'] = maxpwdlife
- if minpwdlife is not None:
- pwpolicy['krbminpwdlife'] = minpwdlife
- if historylength is not None:
- pwpolicy['krbpwdhistorylength'] = historylength
- if minclasses is not None:
- pwpolicy['krbpwdmindiffchars'] = minclasses
- if minlength is not None:
- pwpolicy['krbpwdminlength'] = minlength
- if priority is not None:
- pwpolicy['cospriority'] = priority
- if maxfailcount is not None:
- pwpolicy['krbpwdmaxfailure'] = maxfailcount
- if failinterval is not None:
- pwpolicy['krbpwdfailurecountinterval'] = failinterval
- if lockouttime is not None:
- pwpolicy['krbpwdlockoutduration'] = lockouttime
+ pwpolicy_options = {
+ 'krbmaxpwdlife': maxpwdlife,
+ 'krbminpwdlife': minpwdlife,
+ 'krbpwdhistorylength': historylength,
+ 'krbpwdmindiffchars': minclasses,
+ 'krbpwdminlength': minlength,
+ 'cospriority': priority,
+ 'krbpwdmaxfailure': maxfailcount,
+ 'krbpwdfailurecountinterval': failinterval,
+ 'krbpwdlockoutduration': lockouttime,
+ 'passwordgracelimit': gracelimit,
+ 'ipapwdmaxrepeat': maxrepeat,
+ 'ipapwdmaxsequence': maxsequence,
+ }
+
+ pwpolicy_boolean_options = {
+ 'ipapwddictcheck': dictcheck,
+ 'ipapwdusercheck': usercheck,
+ }
+
+ for option, value in pwpolicy_options.items():
+ if value is not None:
+ pwpolicy[option] = to_native(value)
+
+ for option, value in pwpolicy_boolean_options.items():
+ if value is not None:
+ pwpolicy[option] = bool(value)
return pwpolicy
@@ -199,7 +234,13 @@ def ensure(module, client):
priority=module.params.get('priority'),
maxfailcount=module.params.get('maxfailcount'),
failinterval=module.params.get('failinterval'),
- lockouttime=module.params.get('lockouttime'))
+ lockouttime=module.params.get('lockouttime'),
+ gracelimit=module.params.get('gracelimit'),
+ maxrepeat=module.params.get('maxrepeat'),
+ maxsequence=module.params.get('maxsequence'),
+ dictcheck=module.params.get('dictcheck'),
+ usercheck=module.params.get('usercheck'),
+ )
ipa_pwpolicy = client.pwpolicy_find(name=name)
@@ -236,7 +277,13 @@ def main():
priority=dict(type='str'),
maxfailcount=dict(type='str'),
failinterval=dict(type='str'),
- lockouttime=dict(type='str'))
+ lockouttime=dict(type='str'),
+ gracelimit=dict(type='int'),
+ maxrepeat=dict(type='int'),
+ maxsequence=dict(type='int'),
+ dictcheck=dict(type='bool'),
+ usercheck=dict(type='bool'),
+ )
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
diff --git a/ansible_collections/community/general/plugins/modules/ipa_sudorule.py b/ansible_collections/community/general/plugins/modules/ipa_sudorule.py
index 59b4eb19e..223f6b6de 100644
--- a/ansible_collections/community/general/plugins/modules/ipa_sudorule.py
+++ b/ansible_collections/community/general/plugins/modules/ipa_sudorule.py
@@ -47,6 +47,22 @@ options:
type: list
elements: str
version_added: 2.0.0
+ deny_cmd:
+ description:
+ - List of denied commands assigned to the rule.
+ - If an empty list is passed all commands will be removed from the rule.
+ - If option is omitted commands will not be checked or changed.
+ type: list
+ elements: str
+ version_added: 8.1.0
+ deny_cmdgroup:
+ description:
+ - List of denied command groups assigned to the rule.
+ - If an empty list is passed all command groups will be removed from the rule.
+ - If option is omitted command groups will not be checked or changed.
+ type: list
+ elements: str
+ version_added: 8.1.0
description:
description:
- Description of the sudo rule.
@@ -56,14 +72,14 @@ options:
- List of hosts assigned to the rule.
- If an empty list is passed all hosts will be removed from the rule.
- If option is omitted hosts will not be checked or changed.
- - Option C(hostcategory) must be omitted to assign hosts.
+ - Option O(hostcategory) must be omitted to assign hosts.
type: list
elements: str
hostcategory:
description:
- Host category the rule applies to.
- - If 'all' is passed one must omit C(host) and C(hostgroup).
- - Option C(host) and C(hostgroup) must be omitted to assign 'all'.
+ - If V(all) is passed one must omit O(host) and O(hostgroup).
+ - Option O(host) and O(hostgroup) must be omitted to assign V(all).
choices: ['all']
type: str
hostgroup:
@@ -71,7 +87,7 @@ options:
- List of host groups assigned to the rule.
- If an empty list is passed all host groups will be removed from the rule.
- If option is omitted host groups will not be checked or changed.
- - Option C(hostcategory) must be omitted to assign host groups.
+ - Option O(hostcategory) must be omitted to assign host groups.
type: list
elements: str
runasextusers:
@@ -186,6 +202,7 @@ import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
from ansible.module_utils.common.text.converters import to_native
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
class SudoRuleIPAClient(IPAClient):
@@ -246,6 +263,12 @@ class SudoRuleIPAClient(IPAClient):
def sudorule_add_allow_command_group(self, name, item):
return self._post_json(method='sudorule_add_allow_command', name=name, item={'sudocmdgroup': item})
+ def sudorule_add_deny_command(self, name, item):
+ return self._post_json(method='sudorule_add_deny_command', name=name, item={'sudocmd': item})
+
+ def sudorule_add_deny_command_group(self, name, item):
+ return self._post_json(method='sudorule_add_deny_command', name=name, item={'sudocmdgroup': item})
+
def sudorule_remove_allow_command(self, name, item):
return self._post_json(method='sudorule_remove_allow_command', name=name, item=item)
@@ -303,6 +326,8 @@ def ensure(module, client):
cmd = module.params['cmd']
cmdgroup = module.params['cmdgroup']
cmdcategory = module.params['cmdcategory']
+ deny_cmd = module.params['deny_cmd']
+ deny_cmdgroup = module.params['deny_cmdgroup']
host = module.params['host']
hostcategory = module.params['hostcategory']
hostgroup = module.params['hostgroup']
@@ -310,10 +335,17 @@ def ensure(module, client):
runasgroupcategory = module.params['runasgroupcategory']
runasextusers = module.params['runasextusers']
+ ipa_version = client.get_ipa_version()
if state in ['present', 'enabled']:
- ipaenabledflag = 'TRUE'
+ if LooseVersion(ipa_version) < LooseVersion('4.9.10'):
+ ipaenabledflag = 'TRUE'
+ else:
+ ipaenabledflag = True
else:
- ipaenabledflag = 'FALSE'
+ if LooseVersion(ipa_version) < LooseVersion('4.9.10'):
+ ipaenabledflag = 'FALSE'
+ else:
+ ipaenabledflag = False
sudoopt = module.params['sudoopt']
user = module.params['user']
@@ -359,6 +391,16 @@ def ensure(module, client):
if not module.check_mode:
client.sudorule_add_allow_command_group(name=name, item=cmdgroup)
+ if deny_cmd is not None:
+ changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed
+ if not module.check_mode:
+ client.sudorule_add_deny_command(name=name, item=deny_cmd)
+
+ if deny_cmdgroup is not None:
+ changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed
+ if not module.check_mode:
+ client.sudorule_add_deny_command_group(name=name, item=deny_cmdgroup)
+
if runasusercategory is not None:
changed = category_changed(module, client, 'iparunasusercategory', ipa_sudorule) or changed
@@ -433,6 +475,8 @@ def main():
cmdgroup=dict(type='list', elements='str'),
cmdcategory=dict(type='str', choices=['all']),
cn=dict(type='str', required=True, aliases=['name']),
+ deny_cmd=dict(type='list', elements='str'),
+ deny_cmdgroup=dict(type='list', elements='str'),
description=dict(type='str'),
host=dict(type='list', elements='str'),
hostcategory=dict(type='str', choices=['all']),
@@ -447,7 +491,9 @@ def main():
runasextusers=dict(type='list', elements='str'))
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[['cmdcategory', 'cmd'],
+ ['cmdcategory', 'deny_cmd'],
['cmdcategory', 'cmdgroup'],
+ ['cmdcategory', 'deny_cmdgroup'],
['hostcategory', 'host'],
['hostcategory', 'hostgroup'],
['usercategory', 'user'],
diff --git a/ansible_collections/community/general/plugins/modules/ipa_user.py b/ansible_collections/community/general/plugins/modules/ipa_user.py
index 17b72176e..e8a1858d0 100644
--- a/ansible_collections/community/general/plugins/modules/ipa_user.py
+++ b/ansible_collections/community/general/plugins/modules/ipa_user.py
@@ -30,7 +30,9 @@ options:
default: 'always'
choices: [ always, on_create ]
givenname:
- description: First name.
+ description:
+ - First name.
+ - If user does not exist and O(state=present), the usage of O(givenname) is required.
type: str
krbpasswordexpiration:
description:
@@ -51,10 +53,12 @@ options:
password:
description:
- Password for a user.
- - Will not be set for an existing user unless I(update_password=always), which is the default.
+ - Will not be set for an existing user unless O(update_password=always), which is the default.
type: str
sn:
- description: Surname.
+ description:
+ - Surname.
+ - If user does not exist and O(state=present), the usage of O(sn) is required.
type: str
sshpubkey:
description:
@@ -99,7 +103,9 @@ options:
userauthtype:
description:
- The authentication type to use for the user.
- choices: ["password", "radius", "otp", "pkinit", "hardened"]
+ - To remove all authentication types from the user, use an empty list V([]).
+ - The choice V(idp) and V(passkey) has been added in community.general 8.1.0.
+ choices: ["password", "radius", "otp", "pkinit", "hardened", "idp", "passkey"]
type: list
elements: str
version_added: '1.2.0'
@@ -374,7 +380,7 @@ def main():
title=dict(type='str'),
homedirectory=dict(type='str'),
userauthtype=dict(type='list', elements='str',
- choices=['password', 'radius', 'otp', 'pkinit', 'hardened']))
+ choices=['password', 'radius', 'otp', 'pkinit', 'hardened', 'idp', 'passkey']))
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
diff --git a/ansible_collections/community/general/plugins/modules/ipa_vault.py b/ansible_collections/community/general/plugins/modules/ipa_vault.py
index 84b72c1ab..88947e470 100644
--- a/ansible_collections/community/general/plugins/modules/ipa_vault.py
+++ b/ansible_collections/community/general/plugins/modules/ipa_vault.py
@@ -93,7 +93,6 @@ EXAMPLES = r'''
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
- validate_certs: false
- name: Ensure vault is present for Admin user
community.general.ipa_vault:
diff --git a/ansible_collections/community/general/plugins/modules/ipbase_info.py b/ansible_collections/community/general/plugins/modules/ipbase_info.py
new file mode 100644
index 000000000..c6a5511b7
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ipbase_info.py
@@ -0,0 +1,304 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2023, Dominik Kukacka <dominik.kukacka@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: "ipbase_info"
+version_added: "7.0.0"
+short_description: "Retrieve IP geolocation and other facts of a host's IP address using the ipbase.com API"
+description:
+ - "Retrieve IP geolocation and other facts of a host's IP address using the ipbase.com API"
+author: "Dominik Kukacka (@dominikkukacka)"
+extends_documentation_fragment:
+ - "community.general.attributes"
+ - "community.general.attributes.info_module"
+options:
+ ip:
+ description:
+ - "The IP you want to get the info for. If not specified the API will detect the IP automatically."
+ required: false
+ type: str
+ apikey:
+ description:
+ - "The API key for the request if you need more requests."
+ required: false
+ type: str
+ hostname:
+ description:
+ - "If the O(hostname) parameter is set to V(true), the API response will contain the hostname of the IP."
+ required: false
+ type: bool
+ default: false
+ language:
+ description:
+ - "An ISO Alpha 2 Language Code for localizing the IP data"
+ required: false
+ type: str
+ default: "en"
+notes:
+ - "Check U(https://ipbase.com/) for more information."
+'''
+
+EXAMPLES = '''
+- name: "Get IP geolocation information of the primary outgoing IP"
+ community.general.ipbase_info:
+ register: my_ip_info
+
+- name: "Get IP geolocation information of a specific IP"
+ community.general.ipbase_info:
+ ip: "8.8.8.8"
+ register: my_ip_info
+
+
+- name: "Get IP geolocation information of a specific IP with all other possible parameters"
+ community.general.ipbase_info:
+ ip: "8.8.8.8"
+ apikey: "xxxxxxxxxxxxxxxxxxxxxx"
+ hostname: true
+ language: "de"
+ register: my_ip_info
+
+'''
+
+RETURN = '''
+data:
+ description: "JSON parsed response from ipbase.com. Please refer to U(https://ipbase.com/docs/info) for the detailed structure of the response."
+ returned: success
+ type: dict
+ sample: {
+ "ip": "1.1.1.1",
+ "hostname": "one.one.one.one",
+ "type": "v4",
+ "range_type": {
+ "type": "PUBLIC",
+ "description": "Public address"
+ },
+ "connection": {
+ "asn": 13335,
+ "organization": "Cloudflare, Inc.",
+ "isp": "APNIC Research and Development",
+ "range": "1.1.1.1/32"
+ },
+ "location": {
+ "geonames_id": 5332870,
+ "latitude": 34.053611755371094,
+ "longitude": -118.24549865722656,
+ "zip": "90012",
+ "continent": {
+ "code": "NA",
+ "name": "North America",
+ "name_translated": "North America"
+ },
+ "country": {
+ "alpha2": "US",
+ "alpha3": "USA",
+ "calling_codes": [
+ "+1"
+ ],
+ "currencies": [
+ {
+ "symbol": "$",
+ "name": "US Dollar",
+ "symbol_native": "$",
+ "decimal_digits": 2,
+ "rounding": 0,
+ "code": "USD",
+ "name_plural": "US dollars"
+ }
+ ],
+ "emoji": "...",
+ "ioc": "USA",
+ "languages": [
+ {
+ "name": "English",
+ "name_native": "English"
+ }
+ ],
+ "name": "United States",
+ "name_translated": "United States",
+ "timezones": [
+ "America/New_York",
+ "America/Detroit",
+ "America/Kentucky/Louisville",
+ "America/Kentucky/Monticello",
+ "America/Indiana/Indianapolis",
+ "America/Indiana/Vincennes",
+ "America/Indiana/Winamac",
+ "America/Indiana/Marengo",
+ "America/Indiana/Petersburg",
+ "America/Indiana/Vevay",
+ "America/Chicago",
+ "America/Indiana/Tell_City",
+ "America/Indiana/Knox",
+ "America/Menominee",
+ "America/North_Dakota/Center",
+ "America/North_Dakota/New_Salem",
+ "America/North_Dakota/Beulah",
+ "America/Denver",
+ "America/Boise",
+ "America/Phoenix",
+ "America/Los_Angeles",
+ "America/Anchorage",
+ "America/Juneau",
+ "America/Sitka",
+ "America/Metlakatla",
+ "America/Yakutat",
+ "America/Nome",
+ "America/Adak",
+ "Pacific/Honolulu"
+ ],
+ "is_in_european_union": false,
+ "fips": "US",
+ "geonames_id": 6252001,
+ "hasc_id": "US",
+ "wikidata_id": "Q30"
+ },
+ "city": {
+ "fips": "644000",
+ "alpha2": null,
+ "geonames_id": 5368753,
+ "hasc_id": null,
+ "wikidata_id": "Q65",
+ "name": "Los Angeles",
+ "name_translated": "Los Angeles"
+ },
+ "region": {
+ "fips": "US06",
+ "alpha2": "US-CA",
+ "geonames_id": 5332921,
+ "hasc_id": "US.CA",
+ "wikidata_id": "Q99",
+ "name": "California",
+ "name_translated": "California"
+ }
+ },
+ "tlds": [
+ ".us"
+ ],
+ "timezone": {
+ "id": "America/Los_Angeles",
+ "current_time": "2023-05-04T04:30:28-07:00",
+ "code": "PDT",
+ "is_daylight_saving": true,
+ "gmt_offset": -25200
+ },
+ "security": {
+ "is_anonymous": false,
+ "is_datacenter": false,
+ "is_vpn": false,
+ "is_bot": false,
+ "is_abuser": true,
+ "is_known_attacker": true,
+ "is_proxy": false,
+ "is_spam": false,
+ "is_tor": false,
+ "is_icloud_relay": false,
+ "threat_score": 100
+ },
+ "domains": {
+ "count": 10943,
+ "domains": [
+ "eliwise.academy",
+ "accountingprose.academy",
+ "pistola.academy",
+ "1and1-test-ntlds-fr.accountant",
+ "omnergy.africa"
+ ]
+ }
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible.module_utils.urls import fetch_url
+
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+
+USER_AGENT = 'ansible-community.general.ipbase_info/0.1.0'
+BASE_URL = 'https://api.ipbase.com/v2/info'
+
+
+class IpbaseInfo(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ def _get_url_data(self, url):
+ response, info = fetch_url(
+ self.module,
+ url,
+ force=True,
+ timeout=10,
+ headers={
+ 'Accept': 'application/json',
+ 'User-Agent': USER_AGENT,
+ })
+
+ if info['status'] != 200:
+ self.module.fail_json(msg='The API request to ipbase.com returned an error status code {0}'.format(info['status']))
+ else:
+ try:
+ content = response.read()
+ result = self.module.from_json(content.decode('utf8'))
+ except ValueError:
+ self.module.fail_json(
+ msg='Failed to parse the ipbase.com response: '
+ '{0} {1}'.format(url, content))
+ else:
+ return result
+
+ def info(self):
+
+ ip = self.module.params['ip']
+ apikey = self.module.params['apikey']
+ hostname = self.module.params['hostname']
+ language = self.module.params['language']
+
+ url = BASE_URL
+
+ params = {}
+ if ip:
+ params['ip'] = ip
+
+ if apikey:
+ params['apikey'] = apikey
+
+ if hostname:
+ params['hostname'] = 1
+
+ if language:
+ params['language'] = language
+
+ if params:
+ url += '?' + urlencode(params)
+
+ return self._get_url_data(url)
+
+
+def main():
+ module_args = dict(
+ ip=dict(type='str', required=False, no_log=False),
+ apikey=dict(type='str', required=False, no_log=True),
+ hostname=dict(type='bool', required=False, no_log=False, default=False),
+ language=dict(type='str', required=False, no_log=False, default='en'),
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ )
+
+ ipbase = IpbaseInfo(module)
+ module.exit_json(**ipbase.info())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ipify_facts.py b/ansible_collections/community/general/plugins/modules/ipify_facts.py
index ab96d7e94..ff17d7e54 100644
--- a/ansible_collections/community/general/plugins/modules/ipify_facts.py
+++ b/ansible_collections/community/general/plugins/modules/ipify_facts.py
@@ -35,7 +35,7 @@ options:
default: 10
validate_certs:
description:
- - When set to C(NO), SSL certificates will not be validated.
+ - When set to V(false), SSL certificates will not be validated.
type: bool
default: true
notes:
diff --git a/ansible_collections/community/general/plugins/modules/ipmi_boot.py b/ansible_collections/community/general/plugins/modules/ipmi_boot.py
index 7a4d2b6ec..9f0016560 100644
--- a/ansible_collections/community/general/plugins/modules/ipmi_boot.py
+++ b/ansible_collections/community/general/plugins/modules/ipmi_boot.py
@@ -93,7 +93,6 @@ options:
type: bool
default: false
requirements:
- - "python >= 2.6"
- pyghmi
author: "Bulat Gaifullin (@bgaifullin) <gaifullinbf@gmail.com>"
'''
diff --git a/ansible_collections/community/general/plugins/modules/ipmi_power.py b/ansible_collections/community/general/plugins/modules/ipmi_power.py
index e152f35eb..587cee06f 100644
--- a/ansible_collections/community/general/plugins/modules/ipmi_power.py
+++ b/ansible_collections/community/general/plugins/modules/ipmi_power.py
@@ -58,7 +58,7 @@ options:
- shutdown -- Have system request OS proper shutdown
- reset -- Request system reset without waiting for OS
- boot -- If system is off, then 'on', else 'reset'"
- - Either this option or I(machine) is required.
+ - Either this option or O(machine) is required.
choices: ['on', 'off', shutdown, reset, boot]
type: str
timeout:
@@ -70,7 +70,7 @@ options:
description:
- Provide a list of the remote target address for the bridge IPMI request,
and the power status.
- - Either this option or I(state) is required.
+ - Either this option or O(state) is required.
required: false
type: list
elements: dict
@@ -83,14 +83,13 @@ options:
required: true
state:
description:
- - Whether to ensure that the machine specified by I(targetAddress) in desired state.
- - If this option is not set, the power state is set by I(state).
- - If both this option and I(state) are set, this option takes precedence over I(state).
+ - Whether to ensure that the machine specified by O(machine[].targetAddress) in desired state.
+ - If this option is not set, the power state is set by O(state).
+ - If both this option and O(state) are set, this option takes precedence over O(state).
choices: ['on', 'off', shutdown, reset, boot]
type: str
requirements:
- - "python >= 2.6"
- pyghmi
author: "Bulat Gaifullin (@bgaifullin) <gaifullinbf@gmail.com>"
'''
@@ -98,18 +97,18 @@ author: "Bulat Gaifullin (@bgaifullin) <gaifullinbf@gmail.com>"
RETURN = '''
powerstate:
description: The current power state of the machine.
- returned: success and I(machine) is not provided
+ returned: success and O(machine) is not provided
type: str
sample: 'on'
status:
description: The current power state of the machine when the machine option is set.
- returned: success and I(machine) is provided
+ returned: success and O(machine) is provided
type: list
elements: dict
version_added: 4.3.0
contains:
powerstate:
- description: The current power state of the machine specified by I(targetAddress).
+ description: The current power state of the machine specified by RV(status[].targetAddress).
type: str
targetAddress:
description: The remote target address.
diff --git a/ansible_collections/community/general/plugins/modules/iptables_state.py b/ansible_collections/community/general/plugins/modules/iptables_state.py
index d0ea7ad79..b0cc3bd3f 100644
--- a/ansible_collections/community/general/plugins/modules/iptables_state.py
+++ b/ansible_collections/community/general/plugins/modules/iptables_state.py
@@ -34,8 +34,8 @@ description:
notes:
- The rollback feature is not a module option and depends on task's
attributes. To enable it, the module must be played asynchronously, i.e.
- by setting task attributes I(poll) to C(0), and I(async) to a value less
- or equal to C(ANSIBLE_TIMEOUT). If I(async) is greater, the rollback will
+ by setting task attributes C(poll) to V(0), and C(async) to a value less
+ or equal to C(ANSIBLE_TIMEOUT). If C(async) is greater, the rollback will
still happen if it shall happen, but you will experience a connection
timeout instead of more relevant info returned by the module after its
failure.
@@ -52,7 +52,7 @@ options:
counters:
description:
- Save or restore the values of all packet and byte counters.
- - When C(true), the module is not idempotent.
+ - When V(true), the module is not idempotent.
type: bool
default: false
ip_version:
@@ -65,14 +65,14 @@ options:
description:
- Specify the path to the C(modprobe) program internally used by iptables
related commands to load kernel modules.
- - By default, C(/proc/sys/kernel/modprobe) is inspected to determine the
+ - By default, V(/proc/sys/kernel/modprobe) is inspected to determine the
executable's path.
type: path
noflush:
description:
- - For I(state=restored), ignored otherwise.
- - If C(false), restoring iptables rules from a file flushes (deletes)
- all previous contents of the respective table(s). If C(true), the
+ - For O(state=restored), ignored otherwise.
+ - If V(false), restoring iptables rules from a file flushes (deletes)
+ all previous contents of the respective table(s). If V(true), the
previous rules are left untouched (but policies are updated anyway,
for all built-in chains).
type: bool
@@ -92,10 +92,10 @@ options:
required: true
table:
description:
- - When I(state=restored), restore only the named table even if the input
+ - When O(state=restored), restore only the named table even if the input
file contains other tables. Fail if the named table is not declared in
the file.
- - When I(state=saved), restrict output to the specified table. If not
+ - When O(state=saved), restrict output to the specified table. If not
specified, output includes all active tables.
type: str
choices: [ filter, nat, mangle, raw, security ]
@@ -207,7 +207,9 @@ saved:
"# Completed"
]
tables:
- description: The iptables we have interest for when module starts.
+ description:
+ - The iptables on the system before the module has run, separated by table.
+ - If the option O(table) is used, only this table is included.
type: dict
contains:
table:
@@ -346,20 +348,27 @@ def filter_and_format_state(string):
return lines
-def per_table_state(command, state):
+def parse_per_table_state(all_states_dump):
'''
Convert raw iptables-save output into usable datastructure, for reliable
comparisons between initial and final states.
'''
+ lines = filter_and_format_state(all_states_dump)
tables = dict()
- for t in TABLES:
- COMMAND = list(command)
- if '*%s' % t in state.splitlines():
- COMMAND.extend(['--table', t])
- dummy, out, dummy = module.run_command(COMMAND, check_rc=True)
- out = re.sub(r'(^|\n)(# Generated|# Completed|[*]%s|COMMIT)[^\n]*' % t, r'', out)
- out = re.sub(r' *\[[0-9]+:[0-9]+\] *', r'', out)
- tables[t] = [tt for tt in out.splitlines() if tt != '']
+ current_table = ''
+ current_list = list()
+ for line in lines:
+ if re.match(r'^[*](filter|mangle|nat|raw|security)$', line):
+ current_table = line[1:]
+ continue
+ if line == 'COMMIT':
+ tables[current_table] = current_list
+ current_table = ''
+ current_list = list()
+ continue
+ if line.startswith('# '):
+ continue
+ current_list.append(line)
return tables
@@ -458,7 +467,7 @@ def main():
# The issue comes when wanting to restore state from empty iptable-save's
# output... what happens when, say:
# - no table is specified, and iptables-save's output is only nat table;
- # - we give filter's ruleset to iptables-restore, that locks ourselve out
+ # - we give filter's ruleset to iptables-restore, that locks ourselves out
# of the host;
# then trying to roll iptables state back to the previous (working) setup
# doesn't override current filter table because no filter table is stored
@@ -486,7 +495,7 @@ def main():
# Depending on the value of 'table', initref_state may differ from
# initial_state.
(rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True)
- tables_before = per_table_state(SAVECOMMAND, stdout)
+ tables_before = parse_per_table_state(stdout)
initref_state = filter_and_format_state(stdout)
if state == 'saved':
@@ -583,14 +592,17 @@ def main():
(rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True)
restored_state = filter_and_format_state(stdout)
-
+ tables_after = parse_per_table_state('\n'.join(restored_state))
if restored_state not in (initref_state, initial_state):
- if module.check_mode:
- changed = True
- else:
- tables_after = per_table_state(SAVECOMMAND, stdout)
- if tables_after != tables_before:
+ for table_name, table_content in tables_after.items():
+ if table_name not in tables_before:
+ # Would initialize a table, which doesn't exist yet
+ changed = True
+ break
+ if tables_before[table_name] != table_content:
+ # Content of some table changes
changed = True
+ break
if _back is None or module.check_mode:
module.exit_json(
@@ -633,7 +645,7 @@ def main():
os.remove(b_back)
(rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True)
- tables_rollback = per_table_state(SAVECOMMAND, stdout)
+ tables_rollback = parse_per_table_state(stdout)
msg = (
"Failed to confirm state restored from %s after %ss. "
diff --git a/ansible_collections/community/general/plugins/modules/ipwcli_dns.py b/ansible_collections/community/general/plugins/modules/ipwcli_dns.py
index 7b05aefb7..3ffad79fb 100644
--- a/ansible_collections/community/general/plugins/modules/ipwcli_dns.py
+++ b/ansible_collections/community/general/plugins/modules/ipwcli_dns.py
@@ -54,7 +54,7 @@ options:
address:
description:
- The IP address for the A or AAAA record.
- - Required for I(type=A) or I(type=AAAA).
+ - Required for O(type=A) or O(type=AAAA).
type: str
ttl:
description:
@@ -80,38 +80,38 @@ options:
port:
description:
- Sets the port of the SRV record.
- - Required for I(type=SRV).
+ - Required for O(type=SRV).
type: int
target:
description:
- Sets the target of the SRV record.
- - Required for I(type=SRV).
+ - Required for O(type=SRV).
type: str
order:
description:
- Sets the order of the NAPTR record.
- - Required for I(type=NAPTR).
+ - Required for O(type=NAPTR).
type: int
preference:
description:
- Sets the preference of the NAPTR record.
- - Required for I(type=NAPTR).
+ - Required for O(type=NAPTR).
type: int
flags:
description:
- Sets one of the possible flags of NAPTR record.
- - Required for I(type=NAPTR).
+ - Required for O(type=NAPTR).
type: str
choices: ['S', 'A', 'U', 'P']
service:
description:
- Sets the service of the NAPTR record.
- - Required for I(type=NAPTR).
+ - Required for O(type=NAPTR).
type: str
replacement:
description:
- Sets the replacement of the NAPTR record.
- - Required for I(type=NAPTR).
+ - Required for O(type=NAPTR).
type: str
username:
description:
diff --git a/ansible_collections/community/general/plugins/modules/irc.py b/ansible_collections/community/general/plugins/modules/irc.py
index 6cd7bc120..00ff299ee 100644
--- a/ansible_collections/community/general/plugins/modules/irc.py
+++ b/ansible_collections/community/general/plugins/modules/irc.py
@@ -50,8 +50,7 @@ options:
color:
type: str
description:
- - Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none").
- Added 11 more colors in version 2.0.
+ - Text color for the message.
default: "none"
choices: [ "none", "white", "black", "blue", "green", "red", "brown", "purple", "orange", "yellow", "light_green", "teal", "light_cyan",
"light_blue", "pink", "gray", "light_gray"]
@@ -79,11 +78,17 @@ options:
- Timeout to use while waiting for successful registration and join
messages, this is to prevent an endless loop
default: 30
- use_ssl:
+ use_tls:
description:
- Designates whether TLS/SSL should be used when connecting to the IRC server
+ - O(use_tls) is available since community.general 8.1.0, before the option
+ was exlusively called O(use_ssl). The latter is now an alias of O(use_tls).
+ - B(Note:) for security reasons, you should always set O(use_tls=true) and
+ O(validate_certs=true) whenever possible.
type: bool
default: false
+ aliases:
+ - use_ssl
part:
description:
- Designates whether user should part from channel after sending message or not.
@@ -96,6 +101,16 @@ options:
- Text style for the message. Note italic does not work on some clients
choices: [ "bold", "underline", "reverse", "italic", "none" ]
default: none
+ validate_certs:
+ description:
+ - If set to V(false), the SSL certificates will not be validated.
+ - This should always be set to V(true). Using V(false) is unsafe and should only be done
+ if the network between between Ansible and the IRC server is known to be safe.
+ - B(Note:) for security reasons, you should always set O(use_tls=true) and
+ O(validate_certs=true) whenever possible.
+ default: false
+ type: bool
+ version_added: 8.1.0
# informational: requirements for nodes
requirements: [ socket ]
@@ -108,6 +123,8 @@ EXAMPLES = '''
- name: Send a message to an IRC channel from nick ansible
community.general.irc:
server: irc.example.net
+ use_tls: true
+ validate_certs: true
channel: #t1
msg: Hello world
@@ -116,6 +133,8 @@ EXAMPLES = '''
module: irc
port: 6669
server: irc.example.net
+ use_tls: true
+ validate_certs: true
channel: #t1
msg: 'All finished at {{ ansible_date_time.iso8601 }}'
color: red
@@ -126,6 +145,8 @@ EXAMPLES = '''
module: irc
port: 6669
server: irc.example.net
+ use_tls: true
+ validate_certs: true
channel: #t1
nick_to:
- nick1
@@ -150,7 +171,8 @@ from ansible.module_utils.basic import AnsibleModule
def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, key=None, topic=None,
- nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False, part=True, style=None):
+ nick="ansible", color='none', passwd=False, timeout=30, use_tls=False, validate_certs=True,
+ part=True, style=None):
'''send message to IRC'''
nick_to = [] if nick_to is None else nick_to
@@ -194,8 +216,20 @@ def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, k
message = styletext + colortext + msg
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- if use_ssl:
- irc = ssl.wrap_socket(irc)
+ if use_tls:
+ if validate_certs:
+ try:
+ context = ssl.create_default_context()
+ except AttributeError:
+ raise Exception('Need at least Python 2.7.9 for SSL certificate validation')
+ else:
+ if getattr(ssl, 'PROTOCOL_TLS', None) is not None:
+ # Supported since Python 2.7.13
+ context = ssl.SSLContext(ssl.PROTOCOL_TLS)
+ else:
+ context = ssl.SSLContext()
+ context.verify_mode = ssl.CERT_NONE
+ irc = context.wrap_socket(irc)
irc.connect((server, int(port)))
if passwd:
@@ -275,7 +309,8 @@ def main():
passwd=dict(no_log=True),
timeout=dict(type='int', default=30),
part=dict(type='bool', default=True),
- use_ssl=dict(type='bool', default=False)
+ use_tls=dict(type='bool', default=False, aliases=['use_ssl']),
+ validate_certs=dict(type='bool', default=False),
),
supports_check_mode=True,
required_one_of=[['channel', 'nick_to']]
@@ -294,12 +329,13 @@ def main():
key = module.params["key"]
passwd = module.params["passwd"]
timeout = module.params["timeout"]
- use_ssl = module.params["use_ssl"]
+ use_tls = module.params["use_tls"]
part = module.params["part"]
style = module.params["style"]
+ validate_certs = module.params["validate_certs"]
try:
- send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_ssl, part, style)
+ send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_tls, validate_certs, part, style)
except Exception as e:
module.fail_json(msg="unable to send to IRC: %s" % to_native(e), exception=traceback.format_exc())
diff --git a/ansible_collections/community/general/plugins/modules/iso_create.py b/ansible_collections/community/general/plugins/modules/iso_create.py
index 4b51be96d..c39c710d5 100644
--- a/ansible_collections/community/general/plugins/modules/iso_create.py
+++ b/ansible_collections/community/general/plugins/modules/iso_create.py
@@ -19,7 +19,6 @@ author:
- Diane Wang (@Tomorrow9) <dianew@vmware.com>
requirements:
- "pycdlib"
- - "python >= 2.7"
version_added: '0.2.0'
extends_documentation_fragment:
@@ -35,7 +34,7 @@ options:
src_files:
description:
- This is a list of absolute paths of source files or folders which will be contained in the new generated ISO file.
- - Will fail if specified file or folder in C(src_files) does not exist on local machine.
+ - Will fail if specified file or folder in O(src_files) does not exist on local machine.
- 'Note: With all ISO9660 levels from 1 to 3, all file names are restricted to uppercase letters, numbers and
underscores (_). File names are limited to 31 characters, directory nesting is limited to 8 levels, and path
names are limited to 255 characters.'
@@ -51,9 +50,9 @@ options:
interchange_level:
description:
- The ISO9660 interchange level to use, it dictates the rules on the names of files.
- - Levels and valid values C(1), C(2), C(3), C(4) are supported.
- - The default value is level C(1), which is the most conservative, level C(3) is recommended.
- - ISO9660 file names at interchange level C(1) cannot have more than 8 characters or 3 characters in the extension.
+ - Levels and valid values V(1), V(2), V(3), V(4) are supported.
+ - The default value is level V(1), which is the most conservative, level V(3) is recommended.
+ - ISO9660 file names at interchange level V(1) cannot have more than 8 characters or 3 characters in the extension.
type: int
default: 1
choices: [1, 2, 3, 4]
@@ -64,23 +63,23 @@ options:
rock_ridge:
description:
- Whether to make this ISO have the Rock Ridge extensions or not.
- - Valid values are C(1.09), C(1.10) or C(1.12), means adding the specified Rock Ridge version to the ISO.
- - If unsure, set C(1.09) to ensure maximum compatibility.
+ - Valid values are V(1.09), V(1.10) or V(1.12), means adding the specified Rock Ridge version to the ISO.
+ - If unsure, set V(1.09) to ensure maximum compatibility.
- If not specified, then not add Rock Ridge extension to the ISO.
type: str
choices: ['1.09', '1.10', '1.12']
joliet:
description:
- - Support levels and valid values are C(1), C(2), or C(3).
- - Level C(3) is by far the most common.
+ - Support levels and valid values are V(1), V(2), or V(3).
+ - Level V(3) is by far the most common.
- If not specified, then no Joliet support is added.
type: int
choices: [1, 2, 3]
udf:
description:
- Whether to add UDF support to this ISO.
- - If set to C(True), then version 2.60 of the UDF spec is used.
- - If not specified or set to C(False), then no UDF support is added.
+ - If set to V(true), then version 2.60 of the UDF spec is used.
+ - If not specified or set to V(false), then no UDF support is added.
type: bool
default: false
'''
diff --git a/ansible_collections/community/general/plugins/modules/iso_customize.py b/ansible_collections/community/general/plugins/modules/iso_customize.py
index 9add080b1..543faaa5e 100644
--- a/ansible_collections/community/general/plugins/modules/iso_customize.py
+++ b/ansible_collections/community/general/plugins/modules/iso_customize.py
@@ -15,12 +15,11 @@ module: iso_customize
short_description: Add/remove/change files in ISO file
description:
- This module is used to add/remove/change files in ISO file.
- - The file inside ISO will be overwritten if it exists by option I(add_files).
+ - The file inside ISO will be overwritten if it exists by option O(add_files).
author:
- Yuhua Zou (@ZouYuhua) <zouy@vmware.com>
requirements:
- "pycdlib"
- - "python >= 2.7"
version_added: '5.8.0'
extends_documentation_fragment:
@@ -70,9 +69,9 @@ options:
type: str
required: true
notes:
-- The C(pycdlib) library states it supports Python 2.7 and 3.4 only.
+- The C(pycdlib) library states it supports Python 2.7 and 3.4+.
- >
- The function I(add_file) in pycdlib will overwrite the existing file in ISO with type ISO9660 / Rock Ridge 1.12 / Joliet / UDF.
+ The function C(add_file) in pycdlib will overwrite the existing file in ISO with type ISO9660 / Rock Ridge 1.12 / Joliet / UDF.
But it will not overwrite the existing file in ISO with Rock Ridge 1.09 / 1.10.
So we take workaround "delete the existing file and then add file for ISO with Rock Ridge".
'''
diff --git a/ansible_collections/community/general/plugins/modules/iso_extract.py b/ansible_collections/community/general/plugins/modules/iso_extract.py
index 599cbe4de..087ef2843 100644
--- a/ansible_collections/community/general/plugins/modules/iso_extract.py
+++ b/ansible_collections/community/general/plugins/modules/iso_extract.py
@@ -58,21 +58,18 @@ options:
required: true
force:
description:
- - If C(true), which will replace the remote file when contents are different than the source.
- - If C(false), the file will only be extracted and copied if the destination does not already exist.
+ - If V(true), which will replace the remote file when contents are different than the source.
+ - If V(false), the file will only be extracted and copied if the destination does not already exist.
type: bool
default: true
executable:
description:
- The path to the C(7z) executable to use for extracting files from the ISO.
- - If not provided, it will assume the value C(7z).
+ - If not provided, it will assume the value V(7z).
type: path
notes:
- Only the file checksum (content) is taken into account when extracting files
- from the ISO image. If I(force=false), only checks the presence of the file.
-- In Ansible 2.3 this module was using C(mount) and C(umount) commands only,
- requiring root access. This is no longer needed with the introduction of 7zip
- for extraction.
+ from the ISO image. If O(force=false), only checks the presence of the file.
'''
EXAMPLES = r'''
diff --git a/ansible_collections/community/general/plugins/modules/java_cert.py b/ansible_collections/community/general/plugins/modules/java_cert.py
index a188b16c3..72302b12c 100644
--- a/ansible_collections/community/general/plugins/modules/java_cert.py
+++ b/ansible_collections/community/general/plugins/modules/java_cert.py
@@ -18,6 +18,7 @@ description:
and optionally private keys to a given java keystore, or remove them from it.
extends_documentation_fragment:
- community.general.attributes
+ - ansible.builtin.files
attributes:
check_mode:
support: full
@@ -27,7 +28,7 @@ options:
cert_url:
description:
- Basic URL to fetch SSL certificate from.
- - Exactly one of C(cert_url), C(cert_path) or C(pkcs12_path) is required to load certificate.
+ - Exactly one of O(cert_url), O(cert_path), or O(pkcs12_path) is required to load certificate.
type: str
cert_port:
description:
@@ -38,7 +39,7 @@ options:
cert_path:
description:
- Local path to load certificate from.
- - Exactly one of C(cert_url), C(cert_path) or C(pkcs12_path) is required to load certificate.
+ - Exactly one of O(cert_url), O(cert_path), or O(pkcs12_path) is required to load certificate.
type: path
cert_alias:
description:
@@ -54,10 +55,10 @@ options:
pkcs12_path:
description:
- Local path to load PKCS12 keystore from.
- - Unlike C(cert_url) and C(cert_path), the PKCS12 keystore embeds the private key matching
+ - Unlike O(cert_url) and O(cert_path), the PKCS12 keystore embeds the private key matching
the certificate, and is used to import both the certificate and its private key into the
java keystore.
- - Exactly one of C(cert_url), C(cert_path) or C(pkcs12_path) is required to load certificate.
+ - Exactly one of O(cert_url), O(cert_path), or O(pkcs12_path) is required to load certificate.
type: path
pkcs12_password:
description:
@@ -98,6 +99,24 @@ options:
type: str
choices: [ absent, present ]
default: present
+ mode:
+ version_added: 8.5.0
+ owner:
+ version_added: 8.5.0
+ group:
+ version_added: 8.5.0
+ seuser:
+ version_added: 8.5.0
+ serole:
+ version_added: 8.5.0
+ setype:
+ version_added: 8.5.0
+ selevel:
+ version_added: 8.5.0
+ unsafe_writes:
+ version_added: 8.5.0
+ attributes:
+ version_added: 8.5.0
requirements: [openssl, keytool]
author:
- Adam Hamsik (@haad)
@@ -331,6 +350,12 @@ def build_proxy_options():
return proxy_opts
+def _update_permissions(module, keystore_path):
+ """ Updates keystore file attributes as necessary """
+ file_args = module.load_file_common_arguments(module.params, path=keystore_path)
+ return module.set_fs_attributes_if_different(file_args, False)
+
+
def _download_cert_url(module, executable, url, port):
""" Fetches the certificate from the remote URL using `keytool -printcert...`
The PEM formatted string is returned """
@@ -375,15 +400,15 @@ def import_pkcs12_path(module, executable, pkcs12_path, pkcs12_pass, pkcs12_alia
# Use local certificate from local path and import it to a java keystore
(import_rc, import_out, import_err) = module.run_command(import_cmd, data=secret_data, check_rc=False)
-
diff = {'before': '\n', 'after': '%s\n' % keystore_alias}
- if import_rc == 0 and os.path.exists(keystore_path):
- module.exit_json(changed=True, msg=import_out,
- rc=import_rc, cmd=import_cmd, stdout=import_out,
- error=import_err, diff=diff)
- else:
+
+ if import_rc != 0 or not os.path.exists(keystore_path):
module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd, error=import_err)
+ return dict(changed=True, msg=import_out,
+ rc=import_rc, cmd=import_cmd, stdout=import_out,
+ error=import_err, diff=diff)
+
def import_cert_path(module, executable, path, keystore_path, keystore_pass, alias, keystore_type, trust_cacert):
''' Import certificate from path into keystore located on
@@ -408,17 +433,17 @@ def import_cert_path(module, executable, path, keystore_path, keystore_pass, ali
(import_rc, import_out, import_err) = module.run_command(import_cmd,
data="%s\n%s" % (keystore_pass, keystore_pass),
check_rc=False)
-
diff = {'before': '\n', 'after': '%s\n' % alias}
- if import_rc == 0:
- module.exit_json(changed=True, msg=import_out,
- rc=import_rc, cmd=import_cmd, stdout=import_out,
- error=import_err, diff=diff)
- else:
- module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd)
+ if import_rc != 0:
+ module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd, error=import_err)
+
+ return dict(changed=True, msg=import_out,
+ rc=import_rc, cmd=import_cmd, stdout=import_out,
+ error=import_err, diff=diff)
-def delete_cert(module, executable, keystore_path, keystore_pass, alias, keystore_type, exit_after=True):
+
+def delete_cert(module, executable, keystore_path, keystore_pass, alias, keystore_type):
''' Delete certificate identified with alias from keystore on keystore_path '''
del_cmd = [
executable,
@@ -434,13 +459,13 @@ def delete_cert(module, executable, keystore_path, keystore_pass, alias, keystor
# Delete SSL certificate from keystore
(del_rc, del_out, del_err) = module.run_command(del_cmd, data=keystore_pass, check_rc=True)
+ diff = {'before': '%s\n' % alias, 'after': None}
- if exit_after:
- diff = {'before': '%s\n' % alias, 'after': None}
+ if del_rc != 0:
+ module.fail_json(msg=del_out, rc=del_rc, cmd=del_cmd, error=del_err)
- module.exit_json(changed=True, msg=del_out,
- rc=del_rc, cmd=del_cmd, stdout=del_out,
- error=del_err, diff=diff)
+ return dict(changed=True, msg=del_out, rc=del_rc, cmd=del_cmd,
+ stdout=del_out, error=del_err, diff=diff)
def test_keytool(module, executable):
@@ -485,6 +510,7 @@ def main():
['cert_url', 'cert_path', 'pkcs12_path']
],
supports_check_mode=True,
+ add_file_common_args=True,
)
url = module.params.get('cert_url')
@@ -526,12 +552,14 @@ def main():
module.add_cleanup_file(new_certificate)
module.add_cleanup_file(old_certificate)
+ result = dict()
+
if state == 'absent' and alias_exists:
if module.check_mode:
module.exit_json(changed=True)
- # delete and exit
- delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type)
+ # delete
+ result = delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type)
# dump certificate to enroll in the keystore on disk and compute digest
if state == 'present':
@@ -569,16 +597,20 @@ def main():
if alias_exists:
# The certificate in the keystore does not match with the one we want to be present
# The existing certificate must first be deleted before we insert the correct one
- delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type, exit_after=False)
+ delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type)
if pkcs12_path:
- import_pkcs12_path(module, executable, pkcs12_path, pkcs12_pass, pkcs12_alias,
- keystore_path, keystore_pass, cert_alias, keystore_type)
+ result = import_pkcs12_path(module, executable, pkcs12_path, pkcs12_pass, pkcs12_alias,
+ keystore_path, keystore_pass, cert_alias, keystore_type)
else:
- import_cert_path(module, executable, new_certificate, keystore_path,
- keystore_pass, cert_alias, keystore_type, trust_cacert)
+ result = import_cert_path(module, executable, new_certificate, keystore_path,
+ keystore_pass, cert_alias, keystore_type, trust_cacert)
+
+ if os.path.exists(keystore_path):
+ changed_permissions = _update_permissions(module, keystore_path)
+ result['changed'] = result.get('changed', False) or changed_permissions
- module.exit_json(changed=False)
+ module.exit_json(**result)
if __name__ == "__main__":
diff --git a/ansible_collections/community/general/plugins/modules/java_keystore.py b/ansible_collections/community/general/plugins/modules/java_keystore.py
index 7c2c4884d..2aeab75c0 100644
--- a/ansible_collections/community/general/plugins/modules/java_keystore.py
+++ b/ansible_collections/community/general/plugins/modules/java_keystore.py
@@ -36,7 +36,7 @@ options:
- If the fingerprint of the provided certificate does not match the
fingerprint of the certificate bundled in the keystore, the keystore
is regenerated with the provided certificate.
- - Exactly one of I(certificate) or I(certificate_path) is required.
+ - Exactly one of O(certificate) or O(certificate_path) is required.
type: str
certificate_path:
description:
@@ -44,18 +44,18 @@ options:
- If the fingerprint of the provided certificate does not match the
fingerprint of the certificate bundled in the keystore, the keystore
is regenerated with the provided certificate.
- - Exactly one of I(certificate) or I(certificate_path) is required.
+ - Exactly one of O(certificate) or O(certificate_path) is required.
type: path
version_added: '3.0.0'
private_key:
description:
- Content of the private key used to create the keystore.
- - Exactly one of I(private_key) or I(private_key_path) is required.
+ - Exactly one of O(private_key) or O(private_key_path) is required.
type: str
private_key_path:
description:
- Location of the private key used to create the keystore.
- - Exactly one of I(private_key) or I(private_key_path) is required.
+ - Exactly one of O(private_key) or O(private_key_path) is required.
type: path
version_added: '3.0.0'
private_key_passphrase:
@@ -108,13 +108,13 @@ options:
- Type of the Java keystore.
- When this option is omitted and the keystore doesn't already exist, the
behavior follows C(keytool)'s default store type which depends on
- Java version; C(pkcs12) since Java 9 and C(jks) prior (may also
- be C(pkcs12) if new default has been backported to this version).
+ Java version; V(pkcs12) since Java 9 and V(jks) prior (may also
+ be V(pkcs12) if new default has been backported to this version).
- When this option is omitted and the keystore already exists, the current
type is left untouched, unless another option leads to overwrite the
keystore (in that case, this option behaves like for keystore creation).
- - When I(keystore_type) is set, the keystore is created with this type if
- it doesn't already exist, or is overwritten to match the given type in
+ - When O(keystore_type) is set, the keystore is created with this type if
+ it does not already exist, or is overwritten to match the given type in
case of mismatch.
type: str
choices:
@@ -122,9 +122,9 @@ options:
- pkcs12
version_added: 3.3.0
requirements:
- - openssl in PATH (when I(ssl_backend=openssl))
+ - openssl in PATH (when O(ssl_backend=openssl))
- keytool in PATH
- - cryptography >= 3.0 (when I(ssl_backend=cryptography))
+ - cryptography >= 3.0 (when O(ssl_backend=cryptography))
author:
- Guillaume Grossetie (@Mogztter)
- quidame (@quidame)
@@ -135,13 +135,13 @@ seealso:
- module: community.crypto.openssl_pkcs12
- module: community.general.java_cert
notes:
- - I(certificate) and I(private_key) require that their contents are available
- on the controller (either inline in a playbook, or with the C(file) lookup),
- while I(certificate_path) and I(private_key_path) require that the files are
+ - O(certificate) and O(private_key) require that their contents are available
+ on the controller (either inline in a playbook, or with the P(ansible.builtin.file#lookup) lookup),
+ while O(certificate_path) and O(private_key_path) require that the files are
available on the target host.
- - By design, any change of a value of options I(keystore_type), I(name) or
- I(password), as well as changes of key or certificate materials will cause
- the existing I(dest) to be overwritten.
+ - By design, any change of a value of options O(keystore_type), O(name) or
+ O(password), as well as changes of key or certificate materials will cause
+ the existing O(dest) to be overwritten.
'''
EXAMPLES = '''
diff --git a/ansible_collections/community/general/plugins/modules/jboss.py b/ansible_collections/community/general/plugins/modules/jboss.py
index b389e7e66..3d07a38d6 100644
--- a/ansible_collections/community/general/plugins/modules/jboss.py
+++ b/ansible_collections/community/general/plugins/modules/jboss.py
@@ -30,8 +30,8 @@ options:
src:
description:
- The remote path of the application ear or war to deploy.
- - Required when I(state=present).
- - Ignored when I(state=absent).
+ - Required when O(state=present).
+ - Ignored when O(state=absent).
type: path
deploy_path:
default: /var/lib/jbossas/standalone/deployments
@@ -46,7 +46,7 @@ options:
type: str
notes:
- The JBoss standalone deployment-scanner has to be enabled in standalone.xml
- - The module can wait until I(deployment) file is deployed/undeployed by deployment-scanner.
+ - The module can wait until O(deployment) file is deployed/undeployed by deployment-scanner.
Duration of waiting time depends on scan-interval parameter from standalone.xml.
- Ensure no identically named application is deployed through the JBoss CLI
seealso:
diff --git a/ansible_collections/community/general/plugins/modules/jenkins_build.py b/ansible_collections/community/general/plugins/modules/jenkins_build.py
index 4f9520224..6d830849e 100644
--- a/ansible_collections/community/general/plugins/modules/jenkins_build.py
+++ b/ansible_collections/community/general/plugins/modules/jenkins_build.py
@@ -20,6 +20,7 @@ requirements:
author:
- Brett Milford (@brettmilford)
- Tong He (@unnecessary-username)
+ - Juan Casanova (@juanmcasanova)
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -48,7 +49,7 @@ options:
state:
description:
- Attribute that specifies if the build is to be created, deleted or stopped.
- - The C(stopped) state has been added in community.general 3.3.0.
+ - The V(stopped) state has been added in community.general 3.3.0.
default: present
choices: ['present', 'absent', 'stopped']
type: str
@@ -65,6 +66,19 @@ options:
description:
- User to authenticate with the Jenkins server.
type: str
+ detach:
+ description:
+ - Enable detached mode to not wait for the build end.
+ default: false
+ type: bool
+ version_added: 7.4.0
+ time_between_checks:
+ description:
+ - Time in seconds to wait between requests to the Jenkins server.
+ - This times must be higher than the configured quiet time for the job.
+ default: 10
+ type: int
+ version_added: 7.4.0
'''
EXAMPLES = '''
@@ -152,6 +166,8 @@ class JenkinsBuild:
self.user = module.params.get('user')
self.jenkins_url = module.params.get('url')
self.build_number = module.params.get('build_number')
+ self.detach = module.params.get('detach')
+ self.time_between_checks = module.params.get('time_between_checks')
self.server = self.get_jenkins_connection()
self.result = {
@@ -235,7 +251,14 @@ class JenkinsBuild:
build_status = self.get_build_status()
if build_status['result'] is None:
- sleep(10)
+ # If detached mode is active mark as success, we wouldn't be able to get here if it didn't exist
+ if self.detach:
+ result['changed'] = True
+ result['build_info'] = build_status
+
+ return result
+
+ sleep(self.time_between_checks)
self.get_result()
else:
if self.state == "stopped" and build_status['result'] == "ABORTED":
@@ -273,6 +296,8 @@ def main():
token=dict(no_log=True),
url=dict(default="http://localhost:8080"),
user=dict(),
+ detach=dict(type='bool', default=False),
+ time_between_checks=dict(type='int', default=10),
),
mutually_exclusive=[['password', 'token']],
required_if=[['state', 'absent', ['build_number'], True], ['state', 'stopped', ['build_number'], True]],
@@ -288,7 +313,7 @@ def main():
else:
jenkins_build.absent_build()
- sleep(10)
+ sleep(jenkins_build.time_between_checks)
result = jenkins_build.get_result()
module.exit_json(**result)
diff --git a/ansible_collections/community/general/plugins/modules/jenkins_build_info.py b/ansible_collections/community/general/plugins/modules/jenkins_build_info.py
new file mode 100644
index 000000000..eae6eb937
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/jenkins_build_info.py
@@ -0,0 +1,210 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: jenkins_build_info
+short_description: Get information about Jenkins builds
+version_added: 7.4.0
+description:
+ - Get information about Jenkins builds with Jenkins REST API.
+requirements:
+ - "python-jenkins >= 0.4.12"
+author:
+ - Juan Casanova (@juanmcasanova)
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.info_module
+options:
+ name:
+ description:
+ - Name of the Jenkins job to which the build belongs.
+ required: true
+ type: str
+ build_number:
+ description:
+ - An integer which specifies a build of a job.
+ - If not specified the last build information will be returned.
+ type: int
+ password:
+ description:
+ - Password to authenticate with the Jenkins server.
+ type: str
+ token:
+ description:
+ - API token used to authenticate with the Jenkins server.
+ type: str
+ url:
+ description:
+ - URL of the Jenkins server.
+ default: http://localhost:8080
+ type: str
+ user:
+ description:
+ - User to authenticate with the Jenkins server.
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Get information about a jenkins build using basic authentication
+ community.general.jenkins_build_info:
+ name: "test-check"
+ build_number: 1
+ user: admin
+ password: asdfg
+ url: http://localhost:8080
+
+- name: Get information about a jenkins build anonymously
+ community.general.jenkins_build_info:
+ name: "stop-check"
+ build_number: 3
+ url: http://localhost:8080
+
+- name: Get information about a jenkins build using token authentication
+ community.general.jenkins_build_info:
+ name: "delete-experiment"
+ build_number: 30
+ user: Jenkins
+ token: abcdefghijklmnopqrstuvwxyz123456
+ url: http://localhost:8080
+'''
+
+RETURN = '''
+---
+name:
+ description: Name of the jenkins job.
+ returned: success
+ type: str
+ sample: "test-job"
+state:
+ description: State of the jenkins job.
+ returned: success
+ type: str
+ sample: present
+user:
+ description: User used for authentication.
+ returned: success
+ type: str
+ sample: admin
+url:
+ description: URL to connect to the Jenkins server.
+ returned: success
+ type: str
+ sample: https://jenkins.mydomain.com
+build_info:
+ description: Build info of the jenkins job.
+ returned: success
+ type: dict
+'''
+
+import traceback
+
+JENKINS_IMP_ERR = None
+try:
+ import jenkins
+ python_jenkins_installed = True
+except ImportError:
+ JENKINS_IMP_ERR = traceback.format_exc()
+ python_jenkins_installed = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+
+class JenkinsBuildInfo:
+
+ def __init__(self, module):
+ self.module = module
+
+ self.name = module.params.get('name')
+ self.password = module.params.get('password')
+ self.token = module.params.get('token')
+ self.user = module.params.get('user')
+ self.jenkins_url = module.params.get('url')
+ self.build_number = module.params.get('build_number')
+ self.server = self.get_jenkins_connection()
+
+ self.result = {
+ 'changed': False,
+ 'url': self.jenkins_url,
+ 'name': self.name,
+ 'user': self.user,
+ }
+
+ def get_jenkins_connection(self):
+ try:
+ if (self.user and self.password):
+ return jenkins.Jenkins(self.jenkins_url, self.user, self.password)
+ elif (self.user and self.token):
+ return jenkins.Jenkins(self.jenkins_url, self.user, self.token)
+ elif (self.user and not (self.password or self.token)):
+ return jenkins.Jenkins(self.jenkins_url, self.user)
+ else:
+ return jenkins.Jenkins(self.jenkins_url)
+ except Exception as e:
+ self.module.fail_json(msg='Unable to connect to Jenkins server, %s' % to_native(e))
+
+ def get_build_status(self):
+ try:
+ if self.build_number is None:
+ job_info = self.server.get_job_info(self.name)
+ self.build_number = job_info['lastBuild']['number']
+
+ return self.server.get_build_info(self.name, self.build_number)
+ except jenkins.JenkinsException as e:
+ response = {}
+ response["result"] = "ABSENT"
+ return response
+ except Exception as e:
+ self.module.fail_json(msg='Unable to fetch build information, %s' % to_native(e),
+ exception=traceback.format_exc())
+
+ def get_result(self):
+ result = self.result
+ build_status = self.get_build_status()
+
+ if build_status['result'] == "ABSENT":
+ result['failed'] = True
+ result['build_info'] = build_status
+
+ return result
+
+
+def test_dependencies(module):
+ if not python_jenkins_installed:
+ module.fail_json(
+ msg=missing_required_lib("python-jenkins",
+ url="https://python-jenkins.readthedocs.io/en/latest/install.html"),
+ exception=JENKINS_IMP_ERR)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ build_number=dict(type='int'),
+ name=dict(required=True),
+ password=dict(no_log=True),
+ token=dict(no_log=True),
+ url=dict(default="http://localhost:8080"),
+ user=dict(),
+ ),
+ mutually_exclusive=[['password', 'token']],
+ supports_check_mode=True,
+ )
+
+ test_dependencies(module)
+ jenkins_build_info = JenkinsBuildInfo(module)
+
+ result = jenkins_build_info.get_result()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/jenkins_job.py b/ansible_collections/community/general/plugins/modules/jenkins_job.py
index 09b006448..e8301041f 100644
--- a/ansible_collections/community/general/plugins/modules/jenkins_job.py
+++ b/ansible_collections/community/general/plugins/modules/jenkins_job.py
@@ -30,14 +30,14 @@ options:
description:
- config in XML format.
- Required if job does not yet exist.
- - Mutually exclusive with I(enabled).
- - Considered if I(state=present).
+ - Mutually exclusive with O(enabled).
+ - Considered if O(state=present).
required: false
enabled:
description:
- Whether the job should be enabled or disabled.
- - Mutually exclusive with I(config).
- - Considered if I(state=present).
+ - Mutually exclusive with O(config).
+ - Considered if O(state=present).
type: bool
required: false
name:
@@ -77,10 +77,10 @@ options:
type: bool
default: true
description:
- - If set to C(false), the SSL certificates will not be validated.
- This should only set to C(false) used on personally controlled sites
+ - If set to V(false), the SSL certificates will not be validated.
+ This should only set to V(false) used on personally controlled sites
using self-signed certificates as it avoids verifying the source site.
- - The C(python-jenkins) library only handles this by using the environment variable C(PYTHONHTTPSVERIFY).
+ - The C(python-jenkins) library only handles this by using the environment variable E(PYTHONHTTPSVERIFY).
version_added: 2.3.0
'''
diff --git a/ansible_collections/community/general/plugins/modules/jenkins_job_info.py b/ansible_collections/community/general/plugins/modules/jenkins_job_info.py
index ba6a53117..40e1d7aea 100644
--- a/ansible_collections/community/general/plugins/modules/jenkins_job_info.py
+++ b/ansible_collections/community/general/plugins/modules/jenkins_job_info.py
@@ -15,7 +15,6 @@ module: jenkins_job_info
short_description: Get information about Jenkins jobs
description:
- This module can be used to query information about which Jenkins jobs which already exists.
- - This module was called C(jenkins_job_info) before Ansible 2.9. The usage did not change.
requirements:
- "python-jenkins >= 0.4.12"
extends_documentation_fragment:
@@ -38,12 +37,12 @@ options:
type: str
description:
- Password to authenticate with the Jenkins server.
- - This is mutually exclusive with I(token).
+ - This is mutually exclusive with O(token).
token:
type: str
description:
- API token used to authenticate with the Jenkins server.
- - This is mutually exclusive with I(password).
+ - This is mutually exclusive with O(password).
url:
type: str
description:
@@ -55,8 +54,8 @@ options:
- User to authenticate with the Jenkins server.
validate_certs:
description:
- - If set to C(False), the SSL certificates will not be validated.
- - This should only set to C(False) used on personally controlled sites using self-signed certificates.
+ - If set to V(false), the SSL certificates will not be validated.
+ - This should only set to V(false) used on personally controlled sites using self-signed certificates.
default: true
type: bool
author:
@@ -122,7 +121,6 @@ EXAMPLES = '''
user: admin
token: 126df5c60d66c66e3b75b11104a16a8a
url: https://jenkins.example.com
- validate_certs: false
register: my_jenkins_job_info
'''
diff --git a/ansible_collections/community/general/plugins/modules/jenkins_plugin.py b/ansible_collections/community/general/plugins/modules/jenkins_plugin.py
index 2fbc83e03..13a804a50 100644
--- a/ansible_collections/community/general/plugins/modules/jenkins_plugin.py
+++ b/ansible_collections/community/general/plugins/modules/jenkins_plugin.py
@@ -27,7 +27,7 @@ options:
group:
type: str
description:
- - Name of the Jenkins group on the OS.
+ - GID or name of the Jenkins group on the OS.
default: jenkins
jenkins_home:
type: path
@@ -47,13 +47,13 @@ options:
owner:
type: str
description:
- - Name of the Jenkins user on the OS.
+ - UID or name of the Jenkins user on the OS.
default: jenkins
state:
type: str
description:
- Desired plugin state.
- - If the C(latest) is set, the check for new version will be performed
+ - If set to V(latest), the check for new version will be performed
every time. This is suitable to keep the plugin up-to-date.
choices: [absent, present, pinned, unpinned, enabled, disabled, latest]
default: present
@@ -65,18 +65,18 @@ options:
updates_expiration:
type: int
description:
- - Number of seconds after which a new copy of the I(update-center.json)
+ - Number of seconds after which a new copy of the C(update-center.json)
file is downloaded. This is used to avoid the need to download the
- plugin to calculate its checksum when C(latest) is specified.
- - Set it to C(0) if no cache file should be used. In that case, the
+ plugin to calculate its checksum when O(state=latest) is specified.
+ - Set it to V(0) if no cache file should be used. In that case, the
plugin file will always be downloaded to calculate its checksum when
- C(latest) is specified.
+ O(state=latest) is specified.
default: 86400
updates_url:
type: list
elements: str
description:
- - A list of base URL(s) to retrieve I(update-center.json), and direct plugin files from.
+ - A list of base URL(s) to retrieve C(update-center.json), and direct plugin files from.
- This can be a list since community.general 3.3.0.
default: ['https://updates.jenkins.io', 'http://mirrors.jenkins.io']
update_json_url_segment:
@@ -90,14 +90,14 @@ options:
type: list
elements: str
description:
- - Path inside the I(updates_url) to get latest plugins from.
+ - Path inside the O(updates_url) to get latest plugins from.
default: ['latest']
version_added: 3.3.0
versioned_plugins_url_segments:
type: list
elements: str
description:
- - Path inside the I(updates_url) to get specific version of plugins from.
+ - Path inside the O(updates_url) to get specific version of plugins from.
default: ['download/plugins', 'plugins']
version_added: 3.3.0
url:
@@ -114,11 +114,11 @@ options:
- It might take longer to verify that the correct version is installed.
This is especially true if a specific version number is specified.
- Quote the version to prevent the value to be interpreted as float. For
- example if C(1.20) would be unquoted, it would become C(1.2).
+ example if V(1.20) would be unquoted, it would become V(1.2).
with_dependencies:
description:
- Defines whether to install plugin dependencies.
- - This option takes effect only if the I(version) is not defined.
+ - This option takes effect only if the O(version) is not defined.
type: bool
default: true
@@ -127,11 +127,11 @@ notes:
the plugin files on the disk. Only if the plugin is not installed yet and
no version is specified, the API installation is performed which requires
only the Web UI credentials.
- - It's necessary to notify the handler or call the I(service) module to
+ - It is necessary to notify the handler or call the M(ansible.builtin.service) module to
restart the Jenkins service after a new plugin was installed.
- Pinning works only if the plugin is installed and Jenkins service was
successfully restarted after the plugin installation.
- - It is not possible to run the module remotely by changing the I(url)
+ - It is not possible to run the module remotely by changing the O(url)
parameter to point to the Jenkins server. The module must be used on the
host where Jenkins runs as it needs direct access to the plugin files.
extends_documentation_fragment:
@@ -196,6 +196,29 @@ EXAMPLES = '''
url: http://localhost:8888
#
+# Example of how to authenticate with serverless deployment
+#
+- name: Update plugins on ECS Fargate Jenkins instance
+ community.general.jenkins_plugin:
+ # plugin name and version
+ name: ws-cleanup
+ version: '0.45'
+ # Jenkins home path mounted on ec2-helper VM (example)
+ jenkins_home: "/mnt/{{ jenkins_instance }}"
+ # matching the UID/GID to one in official Jenkins image
+ owner: 1000
+ group: 1000
+ # Jenkins instance URL and admin credentials
+ url: "https://{{ jenkins_instance }}.com/"
+ url_username: admin
+ url_password: p4ssw0rd
+ # make module work from EC2 which has local access
+ # to EFS mount as well as Jenkins URL
+ delegate_to: ec2-helper
+ vars:
+ jenkins_instance: foobar
+
+#
# Example of a Play which handles Jenkins restarts during the state changes
#
- name: Jenkins Master play
diff --git a/ansible_collections/community/general/plugins/modules/jenkins_script.py b/ansible_collections/community/general/plugins/modules/jenkins_script.py
index 7f83ebcdb..030c8e6fa 100644
--- a/ansible_collections/community/general/plugins/modules/jenkins_script.py
+++ b/ansible_collections/community/general/plugins/modules/jenkins_script.py
@@ -42,8 +42,8 @@ options:
default: http://localhost:8080
validate_certs:
description:
- - If set to C(false), the SSL certificates will not be validated.
- This should only set to C(false) used on personally controlled sites
+ - If set to V(false), the SSL certificates will not be validated.
+ This should only set to V(false) used on personally controlled sites
using self-signed certificates as it avoids verifying the source site.
type: bool
default: true
@@ -99,7 +99,7 @@ EXAMPLES = '''
user: admin
password: admin
url: https://localhost
- validate_certs: false
+ validate_certs: false # only do this when you trust the network!
'''
RETURN = '''
diff --git a/ansible_collections/community/general/plugins/modules/jira.py b/ansible_collections/community/general/plugins/modules/jira.py
index 85097c4b7..c36cf9937 100644
--- a/ansible_collections/community/general/plugins/modules/jira.py
+++ b/ansible_collections/community/general/plugins/modules/jira.py
@@ -44,25 +44,25 @@ options:
choices: [ attach, comment, create, edit, fetch, link, search, transition, update, worklog ]
description:
- The operation to perform.
- - C(worklog) was added in community.genereal 6.5.0.
+ - V(worklog) was added in community.general 6.5.0.
username:
type: str
description:
- The username to log-in with.
- - Must be used with I(password). Mutually exclusive with I(token).
+ - Must be used with O(password). Mutually exclusive with O(token).
password:
type: str
description:
- The password to log-in with.
- - Must be used with I(username). Mutually exclusive with I(token).
+ - Must be used with O(username). Mutually exclusive with O(token).
token:
type: str
description:
- The personal access token to log-in with.
- - Mutually exclusive with I(username) and I(password).
+ - Mutually exclusive with O(username) and O(password).
version_added: 4.2.0
project:
@@ -128,20 +128,20 @@ options:
type: str
required: false
description:
- - Only used when I(operation) is C(transition), and a bit of a misnomer, it actually refers to the transition name.
+ - Only used when O(operation) is V(transition), and a bit of a misnomer, it actually refers to the transition name.
assignee:
type: str
required: false
description:
- - Sets the the assignee when I(operation) is C(create), C(transition) or C(edit).
- - Recent versions of JIRA no longer accept a user name as a user identifier. In that case, use I(account_id) instead.
+ - Sets the the assignee when O(operation) is V(create), V(transition), or V(edit).
+ - Recent versions of JIRA no longer accept a user name as a user identifier. In that case, use O(account_id) instead.
- Note that JIRA may not allow changing field values on specific transitions or states.
account_id:
type: str
description:
- - Sets the account identifier for the assignee when I(operation) is C(create), C(transition) or C(edit).
+ - Sets the account identifier for the assignee when O(operation) is V(create), V(transition), or V(edit).
- Note that JIRA may not allow changing field values on specific transitions or states.
version_added: 2.5.0
@@ -183,8 +183,8 @@ options:
maxresults:
required: false
description:
- - Limit the result of I(operation=search). If no value is specified, the default jira limit will be used.
- - Used when I(operation=search) only, ignored otherwise.
+ - Limit the result of O(operation=search). If no value is specified, the default jira limit will be used.
+ - Used when O(operation=search) only, ignored otherwise.
type: int
version_added: '0.2.0'
@@ -198,7 +198,7 @@ options:
validate_certs:
required: false
description:
- - Require valid SSL certificates (set to C(false) if you'd like to use self-signed certificates)
+ - Require valid SSL certificates (set to V(false) if you would like to use self-signed certificates)
default: true
type: bool
@@ -212,12 +212,12 @@ options:
required: true
type: path
description:
- - The path to the file to upload (from the remote node) or, if I(content) is specified,
+ - The path to the file to upload (from the remote node) or, if O(attachment.content) is specified,
the filename to use for the attachment.
content:
type: str
description:
- - The Base64 encoded contents of the file to attach. If not specified, the contents of I(filename) will be
+ - The Base64 encoded contents of the file to attach. If not specified, the contents of O(attachment.filename) will be
used instead.
mimetype:
type: str
@@ -227,7 +227,7 @@ options:
notes:
- "Currently this only works with basic-auth, or tokens."
- - "To use with JIRA Cloud, pass the login e-mail as the I(username) and the API token as I(password)."
+ - "To use with JIRA Cloud, pass the login e-mail as the O(username) and the API token as O(password)."
author:
- "Steve Smith (@tarka)"
@@ -799,7 +799,7 @@ class JIRA(StateModuleHelper):
if msg:
self.module.fail_json(msg=', '.join(msg))
self.module.fail_json(msg=to_native(error))
- # Fallback print body, if it cant be decoded
+ # Fallback print body, if it can't be decoded
self.module.fail_json(msg=to_native(info['body']))
body = response.read()
diff --git a/ansible_collections/community/general/plugins/modules/kdeconfig.py b/ansible_collections/community/general/plugins/modules/kdeconfig.py
index 42a08dd64..4e8d39521 100644
--- a/ansible_collections/community/general/plugins/modules/kdeconfig.py
+++ b/ansible_collections/community/general/plugins/modules/kdeconfig.py
@@ -35,11 +35,11 @@ options:
suboptions:
group:
description:
- - The option's group. One between this and I(groups) is required.
+ - The option's group. One between this and O(values[].groups) is required.
type: str
groups:
description:
- - List of the option's groups. One between this and I(group) is required.
+ - List of the option's groups. One between this and O(values[].group) is required.
type: list
elements: str
key:
@@ -49,12 +49,12 @@ options:
required: true
value:
description:
- - The option's value. One between this and I(bool_value) is required.
+ - The option's value. One between this and O(values[].bool_value) is required.
type: str
bool_value:
description:
- Boolean value.
- - One between this and I(value) is required.
+ - One between this and O(values[].value) is required.
type: bool
required: true
backup:
diff --git a/ansible_collections/community/general/plugins/modules/kernel_blacklist.py b/ansible_collections/community/general/plugins/modules/kernel_blacklist.py
index 1b40999ca..b5bd90403 100644
--- a/ansible_collections/community/general/plugins/modules/kernel_blacklist.py
+++ b/ansible_collections/community/general/plugins/modules/kernel_blacklist.py
@@ -53,7 +53,6 @@ EXAMPLES = '''
import os
import re
-import tempfile
from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper
@@ -106,16 +105,10 @@ class Blacklist(StateModuleHelper):
def __quit_module__(self):
if self.has_changed() and not self.module.check_mode:
- dummy, tmpfile = tempfile.mkstemp()
- try:
- os.remove(tmpfile)
- self.module.preserved_copy(self.vars.filename, tmpfile) # ensure right perms/ownership
- with open(tmpfile, 'w') as fd:
- fd.writelines(["{0}\n".format(x) for x in self.vars.lines])
- self.module.atomic_move(tmpfile, self.vars.filename)
- finally:
- if os.path.exists(tmpfile):
- os.remove(tmpfile)
+ bkp = self.module.backup_local(self.vars.filename)
+ with open(self.vars.filename, "w") as fd:
+ fd.writelines(["{0}\n".format(x) for x in self.vars.lines])
+ self.module.add_cleanup_file(bkp)
def main():
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_authentication.py b/ansible_collections/community/general/plugins/modules/keycloak_authentication.py
index 6143d9d5c..bc2898d9b 100644
--- a/ansible_collections/community/general/plugins/modules/keycloak_authentication.py
+++ b/ansible_collections/community/general/plugins/modules/keycloak_authentication.py
@@ -43,6 +43,7 @@ options:
providerId:
description:
- C(providerId) for the new flow when not copied from an existing flow.
+ choices: [ "basic-flow", "client-flow" ]
type: str
copyFrom:
description:
@@ -97,7 +98,7 @@ options:
type: bool
default: false
description:
- - If C(true), allows to remove the authentication flow and recreate it.
+ - If V(true), allows to remove the authentication flow and recreate it.
extends_documentation_fragment:
- community.general.keycloak
@@ -109,77 +110,77 @@ author:
'''
EXAMPLES = '''
- - name: Create an authentication flow from first broker login and add an execution to it.
- community.general.keycloak_authentication:
- auth_keycloak_url: http://localhost:8080/auth
- auth_realm: master
- auth_username: admin
- auth_password: password
- realm: master
- alias: "Copy of first broker login"
- copyFrom: "first broker login"
- authenticationExecutions:
- - providerId: "test-execution1"
- requirement: "REQUIRED"
- authenticationConfig:
- alias: "test.execution1.property"
- config:
- test1.property: "value"
- - providerId: "test-execution2"
- requirement: "REQUIRED"
- authenticationConfig:
- alias: "test.execution2.property"
- config:
- test2.property: "value"
- state: present
-
- - name: Re-create the authentication flow
- community.general.keycloak_authentication:
- auth_keycloak_url: http://localhost:8080/auth
- auth_realm: master
- auth_username: admin
- auth_password: password
- realm: master
- alias: "Copy of first broker login"
- copyFrom: "first broker login"
- authenticationExecutions:
- - providerId: "test-provisioning"
- requirement: "REQUIRED"
- authenticationConfig:
- alias: "test.provisioning.property"
- config:
- test.provisioning.property: "value"
- state: present
- force: true
-
- - name: Create an authentication flow with subflow containing an execution.
- community.general.keycloak_authentication:
- auth_keycloak_url: http://localhost:8080/auth
- auth_realm: master
- auth_username: admin
- auth_password: password
- realm: master
- alias: "Copy of first broker login"
- copyFrom: "first broker login"
- authenticationExecutions:
- - providerId: "test-execution1"
- requirement: "REQUIRED"
- - displayName: "New Subflow"
- requirement: "REQUIRED"
- - providerId: "auth-cookie"
- requirement: "REQUIRED"
- flowAlias: "New Sublow"
- state: present
-
- - name: Remove authentication.
- community.general.keycloak_authentication:
- auth_keycloak_url: http://localhost:8080/auth
- auth_realm: master
- auth_username: admin
- auth_password: password
- realm: master
- alias: "Copy of first broker login"
- state: absent
+- name: Create an authentication flow from first broker login and add an execution to it.
+ community.general.keycloak_authentication:
+ auth_keycloak_url: http://localhost:8080/auth
+ auth_realm: master
+ auth_username: admin
+ auth_password: password
+ realm: master
+ alias: "Copy of first broker login"
+ copyFrom: "first broker login"
+ authenticationExecutions:
+ - providerId: "test-execution1"
+ requirement: "REQUIRED"
+ authenticationConfig:
+ alias: "test.execution1.property"
+ config:
+ test1.property: "value"
+ - providerId: "test-execution2"
+ requirement: "REQUIRED"
+ authenticationConfig:
+ alias: "test.execution2.property"
+ config:
+ test2.property: "value"
+ state: present
+
+- name: Re-create the authentication flow
+ community.general.keycloak_authentication:
+ auth_keycloak_url: http://localhost:8080/auth
+ auth_realm: master
+ auth_username: admin
+ auth_password: password
+ realm: master
+ alias: "Copy of first broker login"
+ copyFrom: "first broker login"
+ authenticationExecutions:
+ - providerId: "test-provisioning"
+ requirement: "REQUIRED"
+ authenticationConfig:
+ alias: "test.provisioning.property"
+ config:
+ test.provisioning.property: "value"
+ state: present
+ force: true
+
+- name: Create an authentication flow with subflow containing an execution.
+ community.general.keycloak_authentication:
+ auth_keycloak_url: http://localhost:8080/auth
+ auth_realm: master
+ auth_username: admin
+ auth_password: password
+ realm: master
+ alias: "Copy of first broker login"
+ copyFrom: "first broker login"
+ authenticationExecutions:
+ - providerId: "test-execution1"
+ requirement: "REQUIRED"
+ - displayName: "New Subflow"
+ requirement: "REQUIRED"
+ - providerId: "auth-cookie"
+ requirement: "REQUIRED"
+ flowAlias: "New Sublow"
+ state: present
+
+- name: Remove authentication.
+ community.general.keycloak_authentication:
+ auth_keycloak_url: http://localhost:8080/auth
+ auth_realm: master
+ auth_username: admin
+ auth_password: password
+ realm: master
+ alias: "Copy of first broker login"
+ state: absent
'''
RETURN = '''
@@ -279,6 +280,8 @@ def create_or_update_executions(kc, config, realm='master'):
# Compare the executions to see if it need changes
if not is_struct_included(new_exec, existing_executions[exec_index], exclude_key) or exec_index != new_exec_index:
exec_found = True
+ if new_exec['index'] is None:
+ new_exec_index = exec_index
before += str(existing_executions[exec_index]) + '\n'
id_to_update = existing_executions[exec_index]["id"]
# Remove exec from list in case 2 exec with same name
@@ -331,7 +334,7 @@ def main():
meta_args = dict(
realm=dict(type='str', required=True),
alias=dict(type='str', required=True),
- providerId=dict(type='str'),
+ providerId=dict(type='str', choices=["basic-flow", "client-flow"]),
description=dict(type='str'),
copyFrom=dict(type='str'),
authenticationExecutions=dict(type='list', elements='dict',
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_authentication_required_actions.py b/ansible_collections/community/general/plugins/modules/keycloak_authentication_required_actions.py
new file mode 100644
index 000000000..5ffbd2033
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/keycloak_authentication_required_actions.py
@@ -0,0 +1,457 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Eike Frost <ei@kefro.st>
+# Copyright (c) 2021, Christophe Gilles <christophe.gilles54@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_authentication_required_actions
+
+short_description: Allows administration of Keycloak authentication required actions
+
+description:
+ - This module can register, update and delete required actions.
+ - It also filters out any duplicate required actions by their alias. The first occurrence is preserved.
+
+version_added: 7.1.0
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+
+options:
+ realm:
+ description:
+ - The name of the realm in which are the authentication required actions.
+ required: true
+ type: str
+ required_actions:
+ elements: dict
+ description:
+ - Authentication required action.
+ suboptions:
+ alias:
+ description:
+ - Unique name of the required action.
+ required: true
+ type: str
+ config:
+ description:
+ - Configuration for the required action.
+ type: dict
+ defaultAction:
+ description:
+ - Indicates, if any new user will have the required action assigned to it.
+ type: bool
+ enabled:
+ description:
+ - Indicates, if the required action is enabled or not.
+ type: bool
+ name:
+ description:
+ - Displayed name of the required action. Required for registration.
+ type: str
+ priority:
+ description:
+ - Priority of the required action.
+ type: int
+ providerId:
+ description:
+ - Provider ID of the required action. Required for registration.
+ type: str
+ type: list
+ state:
+ choices: [ "absent", "present" ]
+ description:
+ - Control if the realm authentication required actions are going to be registered/updated (V(present)) or deleted (V(absent)).
+ required: true
+ type: str
+
+extends_documentation_fragment:
+ - community.general.keycloak
+ - community.general.attributes
+
+author:
+ - Skrekulko (@Skrekulko)
+'''
+
+EXAMPLES = '''
+- name: Register a new required action.
+ community.general.keycloak_authentication_required_actions:
+ auth_client_id: "admin-cli"
+ auth_keycloak_url: "http://localhost:8080"
+ auth_password: "password"
+ auth_realm: "master"
+ auth_username: "admin"
+ realm: "master"
+ required_action:
+ - alias: "TERMS_AND_CONDITIONS"
+ name: "Terms and conditions"
+ providerId: "TERMS_AND_CONDITIONS"
+ enabled: true
+ state: "present"
+
+- name: Update the newly registered required action.
+ community.general.keycloak_authentication_required_actions:
+ auth_client_id: "admin-cli"
+ auth_keycloak_url: "http://localhost:8080"
+ auth_password: "password"
+ auth_realm: "master"
+ auth_username: "admin"
+ realm: "master"
+ required_action:
+ - alias: "TERMS_AND_CONDITIONS"
+ enabled: false
+ state: "present"
+
+- name: Delete the updated registered required action.
+ community.general.keycloak_authentication_required_actions:
+ auth_client_id: "admin-cli"
+ auth_keycloak_url: "http://localhost:8080"
+ auth_password: "password"
+ auth_realm: "master"
+ auth_username: "admin"
+ realm: "master"
+ required_action:
+ - alias: "TERMS_AND_CONDITIONS"
+ state: "absent"
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+
+end_state:
+ description: Representation of the authentication required actions after module execution.
+ returned: on success
+ type: complex
+ contains:
+ alias:
+ description:
+ - Unique name of the required action.
+ sample: test-provider-id
+ type: str
+ config:
+ description:
+ - Configuration for the required action.
+ sample: {}
+ type: dict
+ defaultAction:
+ description:
+ - Indicates, if any new user will have the required action assigned to it.
+ sample: false
+ type: bool
+ enabled:
+ description:
+ - Indicates, if the required action is enabled or not.
+ sample: false
+ type: bool
+ name:
+ description:
+ - Displayed name of the required action. Required for registration.
+ sample: Test provider ID
+ type: str
+ priority:
+ description:
+ - Priority of the required action.
+ sample: 90
+ type: int
+ providerId:
+ description:
+ - Provider ID of the required action. Required for registration.
+ sample: test-provider-id
+ type: str
+
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+
+
+def sanitize_required_actions(objects):
+ for obj in objects:
+ alias = obj['alias']
+ name = obj['name']
+ provider_id = obj['providerId']
+
+ if not name:
+ obj['name'] = alias
+
+ if provider_id != alias:
+ obj['providerId'] = alias
+
+ return objects
+
+
+def filter_duplicates(objects):
+ filtered_objects = {}
+
+ for obj in objects:
+ alias = obj["alias"]
+
+ if alias not in filtered_objects:
+ filtered_objects[alias] = obj
+
+ return list(filtered_objects.values())
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+
+ meta_args = dict(
+ realm=dict(type='str', required=True),
+ required_actions=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ alias=dict(type='str', required=True),
+ config=dict(type='dict'),
+ defaultAction=dict(type='bool'),
+ enabled=dict(type='bool'),
+ name=dict(type='str'),
+ priority=dict(type='int'),
+ providerId=dict(type='str')
+ )
+ ),
+ state=dict(type='str', choices=['present', 'absent'], required=True)
+ )
+
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
+ required_together=([['auth_realm', 'auth_username', 'auth_password']])
+ )
+
+ result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={}))
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(module.params)
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+
+ kc = KeycloakAPI(module, connection_header)
+
+ # Convenience variables
+ realm = module.params.get('realm')
+ desired_required_actions = module.params.get('required_actions')
+ state = module.params.get('state')
+
+ # Sanitize required actions
+ desired_required_actions = sanitize_required_actions(desired_required_actions)
+
+ # Filter out duplicate required actions
+ desired_required_actions = filter_duplicates(desired_required_actions)
+
+ # Get required actions
+ before_required_actions = kc.get_required_actions(realm=realm)
+
+ if state == 'present':
+ # Initialize empty lists to hold the required actions that need to be
+ # registered, updated, and original ones of the updated one
+ register_required_actions = []
+ before_updated_required_actions = []
+ updated_required_actions = []
+
+ # Loop through the desired required actions and check if they exist in the before required actions
+ for desired_required_action in desired_required_actions:
+ found = False
+
+ # Loop through the before required actions and check if the aliases match
+ for before_required_action in before_required_actions:
+ if desired_required_action['alias'] == before_required_action['alias']:
+ update_required = False
+
+ # Fill in the parameters
+ for k, v in before_required_action.items():
+ if k not in desired_required_action or desired_required_action[k] is None:
+ desired_required_action[k] = v
+
+ # Loop through the keys of the desired and before required actions
+ # and check if there are any differences between them
+ for key in desired_required_action.keys():
+ if key in before_required_action and desired_required_action[key] != before_required_action[key]:
+ update_required = True
+ break
+
+ # If there are differences, add the before and desired required actions
+ # to their respective lists for updating
+ if update_required:
+ before_updated_required_actions.append(before_required_action)
+ updated_required_actions.append(desired_required_action)
+ found = True
+ break
+ # If the desired required action is not found in the before required actions,
+ # add it to the list of required actions to register
+ if not found:
+ # Check if name is provided
+ if 'name' not in desired_required_action or desired_required_action['name'] is None:
+ module.fail_json(
+ msg='Unable to register required action %s in realm %s: name not included'
+ % (desired_required_action['alias'], realm)
+ )
+
+ # Check if provider ID is provided
+ if 'providerId' not in desired_required_action or desired_required_action['providerId'] is None:
+ module.fail_json(
+ msg='Unable to register required action %s in realm %s: providerId not included'
+ % (desired_required_action['alias'], realm)
+ )
+
+ register_required_actions.append(desired_required_action)
+
+ # Handle diff
+ if module._diff:
+ diff_required_actions = updated_required_actions.copy()
+ diff_required_actions.extend(register_required_actions)
+
+ result['diff'] = dict(
+ before=before_updated_required_actions,
+ after=diff_required_actions
+ )
+
+ # Handle changed
+ if register_required_actions or updated_required_actions:
+ result['changed'] = True
+
+ # Handle check mode
+ if module.check_mode:
+ if register_required_actions or updated_required_actions:
+ result['change'] = True
+ result['msg'] = 'Required actions would be registered/updated'
+ else:
+ result['change'] = False
+ result['msg'] = 'Required actions would not be registered/updated'
+
+ module.exit_json(**result)
+
+ # Register required actions
+ if register_required_actions:
+ for register_required_action in register_required_actions:
+ kc.register_required_action(realm=realm, rep=register_required_action)
+ kc.update_required_action(alias=register_required_action['alias'], realm=realm, rep=register_required_action)
+
+ # Update required actions
+ if updated_required_actions:
+ for updated_required_action in updated_required_actions:
+ kc.update_required_action(alias=updated_required_action['alias'], realm=realm, rep=updated_required_action)
+
+ # Initialize the final list of required actions
+ final_required_actions = []
+
+ # Iterate over the before_required_actions
+ for before_required_action in before_required_actions:
+ # Check if there is an updated_required_action with the same alias
+ updated_required_action_found = False
+
+ for updated_required_action in updated_required_actions:
+ if updated_required_action['alias'] == before_required_action['alias']:
+ # Merge the two dictionaries, favoring the values from updated_required_action
+ merged_dict = {}
+ for key in before_required_action.keys():
+ if key in updated_required_action:
+ merged_dict[key] = updated_required_action[key]
+ else:
+ merged_dict[key] = before_required_action[key]
+
+ for key in updated_required_action.keys():
+ if key not in before_required_action:
+ merged_dict[key] = updated_required_action[key]
+
+ # Add the merged dictionary to the final list of required actions
+ final_required_actions.append(merged_dict)
+
+ # Mark the updated_required_action as found
+ updated_required_action_found = True
+
+ # Stop looking for updated_required_action
+ break
+
+ # If no matching updated_required_action was found, add the before_required_action to the final list of required actions
+ if not updated_required_action_found:
+ final_required_actions.append(before_required_action)
+
+ # Append any remaining updated_required_actions that were not merged
+ for updated_required_action in updated_required_actions:
+ if not any(updated_required_action['alias'] == action['alias'] for action in final_required_actions):
+ final_required_actions.append(updated_required_action)
+
+ # Append newly registered required actions
+ final_required_actions.extend(register_required_actions)
+
+ # Handle message and end state
+ result['msg'] = 'Required actions registered/updated'
+ result['end_state'] = final_required_actions
+ else:
+ # Filter out the deleted required actions
+ final_required_actions = []
+ delete_required_actions = []
+
+ for before_required_action in before_required_actions:
+ delete_action = False
+
+ for desired_required_action in desired_required_actions:
+ if before_required_action['alias'] == desired_required_action['alias']:
+ delete_action = True
+ break
+
+ if not delete_action:
+ final_required_actions.append(before_required_action)
+ else:
+ delete_required_actions.append(before_required_action)
+
+ # Handle diff
+ if module._diff:
+ result['diff'] = dict(
+ before=before_required_actions,
+ after=final_required_actions
+ )
+
+ # Handle changed
+ if delete_required_actions:
+ result['changed'] = True
+
+ # Handle check mode
+ if module.check_mode:
+ if final_required_actions:
+ result['change'] = True
+ result['msg'] = 'Required actions would be deleted'
+ else:
+ result['change'] = False
+ result['msg'] = 'Required actions would not be deleted'
+
+ module.exit_json(**result)
+
+ # Delete required actions
+ if delete_required_actions:
+ for delete_required_action in delete_required_actions:
+ kc.delete_required_action(alias=delete_required_action['alias'], realm=realm)
+
+ # Handle message and end state
+ result['msg'] = 'Required actions deleted'
+ result['end_state'] = final_required_actions
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_authz_authorization_scope.py b/ansible_collections/community/general/plugins/modules/keycloak_authz_authorization_scope.py
index c451d3751..5eef9ac76 100644
--- a/ansible_collections/community/general/plugins/modules/keycloak_authz_authorization_scope.py
+++ b/ansible_collections/community/general/plugins/modules/keycloak_authz_authorization_scope.py
@@ -40,8 +40,8 @@ options:
state:
description:
- State of the authorization scope.
- - On C(present), the authorization scope will be created (or updated if it exists already).
- - On C(absent), the authorization scope will be removed if it exists.
+ - On V(present), the authorization scope will be created (or updated if it exists already).
+ - On V(absent), the authorization scope will be removed if it exists.
choices: ['present', 'absent']
default: 'present'
type: str
@@ -108,22 +108,22 @@ end_state:
id:
description: ID of the authorization scope.
type: str
- returned: when I(state=present)
+ returned: when O(state=present)
sample: a6ab1cf2-1001-40ec-9f39-48f23b6a0a41
name:
description: Name of the authorization scope.
type: str
- returned: when I(state=present)
+ returned: when O(state=present)
sample: file:delete
display_name:
description: Display name of the authorization scope.
type: str
- returned: when I(state=present)
+ returned: when O(state=present)
sample: File delete
icon_uri:
description: Icon URI for the authorization scope.
type: str
- returned: when I(state=present)
+ returned: when O(state=present)
sample: http://localhost/icon.png
'''
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_authz_custom_policy.py b/ansible_collections/community/general/plugins/modules/keycloak_authz_custom_policy.py
new file mode 100644
index 000000000..8363c252e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/keycloak_authz_custom_policy.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Eike Frost <ei@kefro.st>
+# Copyright (c) 2021, Christophe Gilles <christophe.gilles54@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_authz_custom_policy
+
+short_description: Allows administration of Keycloak client custom Javascript policies via Keycloak API
+
+version_added: 7.5.0
+
+description:
+ - This module allows the administration of Keycloak client custom Javascript via the Keycloak REST
+ API. Custom Javascript policies are only available if a client has Authorization enabled and if
+ they have been deployed to the Keycloak server as JAR files.
+
+ - This module requires access to the REST API via OpenID Connect; the user connecting and the realm
+ being used must have the requisite access rights. In a default Keycloak installation, admin-cli
+ and an admin user would work, as would a separate realm definition with the scope tailored
+ to your needs and a user having the expected roles.
+
+ - The names of module options are snake_cased versions of the camelCase options used by Keycloak.
+ The Authorization Services paths and payloads have not officially been documented by the Keycloak project.
+ U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/)
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ state:
+ description:
+ - State of the custom policy.
+ - On V(present), the custom policy will be created (or updated if it exists already).
+ - On V(absent), the custom policy will be removed if it exists.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ name:
+ description:
+ - Name of the custom policy to create.
+ type: str
+ required: true
+ policy_type:
+ description:
+ - The type of the policy. This must match the name of the custom policy deployed to the server.
+ - Multiple policies pointing to the same policy type can be created, but their names have to differ.
+ type: str
+ required: true
+ client_id:
+ description:
+ - The V(clientId) of the Keycloak client that should have the custom policy attached to it.
+ - This is usually a human-readable name of the Keycloak client.
+ type: str
+ required: true
+ realm:
+ description:
+ - The name of the Keycloak realm the Keycloak client is in.
+ type: str
+ required: true
+
+extends_documentation_fragment:
+ - community.general.keycloak
+ - community.general.attributes
+
+author:
+ - Samuli Seppänen (@mattock)
+'''
+
+EXAMPLES = '''
+- name: Manage Keycloak custom authorization policy
+ community.general.keycloak_authz_custom_policy:
+ name: OnlyOwner
+ state: present
+ policy_type: script-policy.js
+ client_id: myclient
+ realm: myrealm
+ auth_keycloak_url: http://localhost:8080/auth
+ auth_username: keycloak
+ auth_password: keycloak
+ auth_realm: master
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+
+end_state:
+ description: Representation of the custom policy after module execution.
+ returned: on success
+ type: dict
+ contains:
+ name:
+ description: Name of the custom policy.
+ type: str
+ returned: when I(state=present)
+ sample: file:delete
+ policy_type:
+ description: Type of custom policy.
+ type: str
+ returned: when I(state=present)
+ sample: File delete
+
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+
+ meta_args = dict(
+ state=dict(type='str', default='present',
+ choices=['present', 'absent']),
+ name=dict(type='str', required=True),
+ policy_type=dict(type='str', required=True),
+ client_id=dict(type='str', required=True),
+ realm=dict(type='str', required=True)
+ )
+
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=(
+ [['token', 'auth_realm', 'auth_username', 'auth_password']]),
+ required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+
+ result = dict(changed=False, msg='', end_state={})
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(module.params)
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+
+ kc = KeycloakAPI(module, connection_header)
+
+ # Convenience variables
+ state = module.params.get('state')
+ name = module.params.get('name')
+ policy_type = module.params.get('policy_type')
+ client_id = module.params.get('client_id')
+ realm = module.params.get('realm')
+
+ cid = kc.get_client_id(client_id, realm=realm)
+ if not cid:
+ module.fail_json(msg='Invalid client %s for realm %s' %
+ (client_id, realm))
+
+ before_authz_custom_policy = kc.get_authz_policy_by_name(
+ name=name, client_id=cid, realm=realm)
+
+ desired_authz_custom_policy = {}
+ desired_authz_custom_policy['name'] = name
+ desired_authz_custom_policy['type'] = policy_type
+
+ # Modifying existing custom policies is not possible
+ if before_authz_custom_policy and state == 'present':
+ result['msg'] = "Custom policy %s already exists" % (name)
+ result['changed'] = False
+ result['end_state'] = desired_authz_custom_policy
+ elif not before_authz_custom_policy and state == 'present':
+ if module.check_mode:
+ result['msg'] = "Would create custom policy %s" % (name)
+ else:
+ kc.create_authz_custom_policy(
+ payload=desired_authz_custom_policy, policy_type=policy_type, client_id=cid, realm=realm)
+ result['msg'] = "Custom policy %s created" % (name)
+
+ result['changed'] = True
+ result['end_state'] = desired_authz_custom_policy
+ elif before_authz_custom_policy and state == 'absent':
+ if module.check_mode:
+ result['msg'] = "Would remove custom policy %s" % (name)
+ else:
+ kc.remove_authz_custom_policy(
+ policy_id=before_authz_custom_policy['id'], client_id=cid, realm=realm)
+ result['msg'] = "Custom policy %s removed" % (name)
+
+ result['changed'] = True
+ result['end_state'] = {}
+ elif not before_authz_custom_policy and state == 'absent':
+ result['msg'] = "Custom policy %s does not exist" % (name)
+ result['changed'] = False
+ result['end_state'] = {}
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_authz_permission.py b/ansible_collections/community/general/plugins/modules/keycloak_authz_permission.py
new file mode 100644
index 000000000..ef81fb8c3
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/keycloak_authz_permission.py
@@ -0,0 +1,433 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Eike Frost <ei@kefro.st>
+# Copyright (c) 2021, Christophe Gilles <christophe.gilles54@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_authz_permission
+
+version_added: 7.2.0
+
+short_description: Allows administration of Keycloak client authorization permissions via Keycloak API
+
+description:
+ - This module allows the administration of Keycloak client authorization permissions via the Keycloak REST
+ API. Authorization permissions are only available if a client has Authorization enabled.
+
+ - There are some peculiarities in JSON paths and payloads for authorization permissions. In particular
+ POST and PUT operations are targeted at permission endpoints, whereas GET requests go to policies
+ endpoint. To make matters more interesting the JSON responses from GET requests return data in a
+ different format than what is expected for POST and PUT. The end result is that it is not possible to
+ detect changes to things like policies, scopes or resources - at least not without a large number of
+ additional API calls. Therefore this module always updates authorization permissions instead of
+ attempting to determine if changes are truly needed.
+
+ - This module requires access to the REST API via OpenID Connect; the user connecting and the realm
+ being used must have the requisite access rights. In a default Keycloak installation, admin-cli
+ and an admin user would work, as would a separate realm definition with the scope tailored
+ to your needs and a user having the expected roles.
+
+ - The names of module options are snake_cased versions of the camelCase options used by Keycloak.
+ The Authorization Services paths and payloads have not officially been documented by the Keycloak project.
+ U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/)
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ state:
+ description:
+ - State of the authorization permission.
+ - On V(present), the authorization permission will be created (or updated if it exists already).
+ - On V(absent), the authorization permission will be removed if it exists.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ name:
+ description:
+ - Name of the authorization permission to create.
+ type: str
+ required: true
+ description:
+ description:
+ - The description of the authorization permission.
+ type: str
+ required: false
+ permission_type:
+ description:
+ - The type of authorization permission.
+ - On V(scope) create a scope-based permission.
+ - On V(resource) create a resource-based permission.
+ type: str
+ required: true
+ choices:
+ - resource
+ - scope
+ decision_strategy:
+ description:
+ - The decision strategy to use with this permission.
+ type: str
+ default: UNANIMOUS
+ required: false
+ choices:
+ - UNANIMOUS
+ - AFFIRMATIVE
+ - CONSENSUS
+ resources:
+ description:
+ - Resource names to attach to this permission.
+ - Scope-based permissions can only include one resource.
+ - Resource-based permissions can include multiple resources.
+ type: list
+ elements: str
+ default: []
+ required: false
+ scopes:
+ description:
+ - Scope names to attach to this permission.
+ - Resource-based permissions cannot have scopes attached to them.
+ type: list
+ elements: str
+ default: []
+ required: false
+ policies:
+ description:
+ - Policy names to attach to this permission.
+ type: list
+ elements: str
+ default: []
+ required: false
+ client_id:
+ description:
+ - The clientId of the keycloak client that should have the authorization scope.
+ - This is usually a human-readable name of the Keycloak client.
+ type: str
+ required: true
+ realm:
+ description:
+ - The name of the Keycloak realm the Keycloak client is in.
+ type: str
+ required: true
+
+extends_documentation_fragment:
+ - community.general.keycloak
+ - community.general.attributes
+
+author:
+ - Samuli Seppänen (@mattock)
+'''
+
+EXAMPLES = '''
+- name: Manage scope-based Keycloak authorization permission
+ community.general.keycloak_authz_permission:
+ name: ScopePermission
+ state: present
+ description: Scope permission
+ permission_type: scope
+ scopes:
+ - file:delete
+ policies:
+ - Default Policy
+ client_id: myclient
+ realm: myrealm
+ auth_keycloak_url: http://localhost:8080/auth
+ auth_username: keycloak
+ auth_password: keycloak
+ auth_realm: master
+
+- name: Manage resource-based Keycloak authorization permission
+ community.general.keycloak_authz_permission:
+ name: ResourcePermission
+ state: present
+ description: Resource permission
+ permission_type: resource
+ resources:
+ - Default Resource
+ policies:
+ - Default Policy
+ client_id: myclient
+ realm: myrealm
+ auth_keycloak_url: http://localhost:8080/auth
+ auth_username: keycloak
+ auth_password: keycloak
+ auth_realm: master
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+
+end_state:
+ description: Representation of the authorization permission after module execution.
+ returned: on success
+ type: complex
+ contains:
+ id:
+ description: ID of the authorization permission.
+ type: str
+ returned: when O(state=present)
+ sample: 9da05cd2-b273-4354-bbd8-0c133918a454
+ name:
+ description: Name of the authorization permission.
+ type: str
+ returned: when O(state=present)
+ sample: ResourcePermission
+ description:
+ description: Description of the authorization permission.
+ type: str
+ returned: when O(state=present)
+ sample: Resource Permission
+ type:
+ description: Type of the authorization permission.
+ type: str
+ returned: when O(state=present)
+ sample: resource
+ decisionStrategy:
+ description: The decision strategy to use.
+ type: str
+ returned: when O(state=present)
+ sample: UNANIMOUS
+ logic:
+ description: The logic used for the permission (part of the payload, but has a fixed value).
+ type: str
+ returned: when O(state=present)
+ sample: POSITIVE
+ resources:
+ description: IDs of resources attached to this permission.
+ type: list
+ returned: when O(state=present)
+ sample:
+ - 49e052ff-100d-4b79-a9dd-52669ed3c11d
+ scopes:
+ description: IDs of scopes attached to this permission.
+ type: list
+ returned: when O(state=present)
+ sample:
+ - 9da05cd2-b273-4354-bbd8-0c133918a454
+ policies:
+ description: IDs of policies attached to this permission.
+ type: list
+ returned: when O(state=present)
+ sample:
+ - 9da05cd2-b273-4354-bbd8-0c133918a454
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+
+ meta_args = dict(
+ state=dict(type='str', default='present',
+ choices=['present', 'absent']),
+ name=dict(type='str', required=True),
+ description=dict(type='str', required=False),
+ permission_type=dict(type='str', choices=['scope', 'resource'], required=True),
+ decision_strategy=dict(type='str', default='UNANIMOUS',
+ choices=['UNANIMOUS', 'AFFIRMATIVE', 'CONSENSUS']),
+ resources=dict(type='list', elements='str', default=[], required=False),
+ scopes=dict(type='list', elements='str', default=[], required=False),
+ policies=dict(type='list', elements='str', default=[], required=False),
+ client_id=dict(type='str', required=True),
+ realm=dict(type='str', required=True)
+ )
+
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=(
+ [['token', 'auth_realm', 'auth_username', 'auth_password']]),
+ required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+
+ # Convenience variables
+ state = module.params.get('state')
+ name = module.params.get('name')
+ description = module.params.get('description')
+ permission_type = module.params.get('permission_type')
+ decision_strategy = module.params.get('decision_strategy')
+ realm = module.params.get('realm')
+ client_id = module.params.get('client_id')
+ realm = module.params.get('realm')
+ resources = module.params.get('resources')
+ scopes = module.params.get('scopes')
+ policies = module.params.get('policies')
+
+ if permission_type == 'scope' and state == 'present':
+ if scopes == []:
+ module.fail_json(msg='Scopes need to defined when permission type is set to scope!')
+ if len(resources) > 1:
+ module.fail_json(msg='Only one resource can be defined for a scope permission!')
+
+ if permission_type == 'resource' and state == 'present':
+ if resources == []:
+ module.fail_json(msg='A resource need to defined when permission type is set to resource!')
+ if scopes != []:
+ module.fail_json(msg='Scopes cannot be defined when permission type is set to resource!')
+
+ result = dict(changed=False, msg='', end_state={})
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(module.params)
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+
+ kc = KeycloakAPI(module, connection_header)
+
+ # Get id of the client based on client_id
+ cid = kc.get_client_id(client_id, realm=realm)
+ if not cid:
+ module.fail_json(msg='Invalid client %s for realm %s' %
+ (client_id, realm))
+
+ # Get current state of the permission using its name as the search
+ # filter. This returns False if it is not found.
+ permission = kc.get_authz_permission_by_name(
+ name=name, client_id=cid, realm=realm)
+
+ # Generate a JSON payload for Keycloak Admin API. This is needed for
+ # "create" and "update" operations.
+ payload = {}
+ payload['name'] = name
+ payload['description'] = description
+ payload['type'] = permission_type
+ payload['decisionStrategy'] = decision_strategy
+ payload['logic'] = 'POSITIVE'
+ payload['scopes'] = []
+ payload['resources'] = []
+ payload['policies'] = []
+
+ if permission_type == 'scope':
+ # Add the resource id, if any, to the payload. While the data type is a
+ # list, it is only possible to have one entry in it based on what Keycloak
+ # Admin Console does.
+ r = False
+ resource_scopes = []
+
+ if resources:
+ r = kc.get_authz_resource_by_name(resources[0], cid, realm)
+ if not r:
+ module.fail_json(msg='Unable to find authorization resource with name %s for client %s in realm %s' % (resources[0], cid, realm))
+ else:
+ payload['resources'].append(r['_id'])
+
+ for rs in r['scopes']:
+ resource_scopes.append(rs['id'])
+
+ # Generate a list of scope ids based on scope names. Fail if the
+ # defined resource does not include all those scopes.
+ for scope in scopes:
+ s = kc.get_authz_authorization_scope_by_name(scope, cid, realm)
+ if r and not s['id'] in resource_scopes:
+ module.fail_json(msg='Resource %s does not include scope %s for client %s in realm %s' % (resources[0], scope, client_id, realm))
+ else:
+ payload['scopes'].append(s['id'])
+
+ elif permission_type == 'resource':
+ if resources:
+ for resource in resources:
+ r = kc.get_authz_resource_by_name(resource, cid, realm)
+ if not r:
+ module.fail_json(msg='Unable to find authorization resource with name %s for client %s in realm %s' % (resource, cid, realm))
+ else:
+ payload['resources'].append(r['_id'])
+
+ # Add policy ids, if any, to the payload.
+ if policies:
+ for policy in policies:
+ p = kc.get_authz_policy_by_name(policy, cid, realm)
+
+ if p:
+ payload['policies'].append(p['id'])
+ else:
+ module.fail_json(msg='Unable to find authorization policy with name %s for client %s in realm %s' % (policy, client_id, realm))
+
+ # Add "id" to payload for update operations
+ if permission:
+ payload['id'] = permission['id']
+
+ # Handle the special case where the user attempts to change an already
+ # existing permission's type - something that can't be done without a
+ # full delete -> (re)create cycle.
+ if permission['type'] != payload['type']:
+ module.fail_json(msg='Modifying the type of permission (scope/resource) is not supported: \
+ permission %s of client %s in realm %s unchanged' % (permission['id'], cid, realm))
+
+ # Updating an authorization permission is tricky for several reasons.
+ # Firstly, the current permission is retrieved using a _policy_ endpoint,
+ # not from a permission endpoint. Also, the data that is returned is in a
+ # different format than what is expected by the payload. So, comparing the
+ # current state attribute by attribute to the payload is not possible. For
+ # example the data contains a JSON object "config" which may contain the
+ # authorization type, but which is no required in the payload. Moreover,
+ # information about resources, scopes and policies is _not_ present in the
+ # data. So, there is no way to determine if any of those fields have
+ # changed. Therefore the best options we have are
+ #
+ # a) Always apply the payload without checking the current state
+ # b) Refuse to make any changes to any settings (only support create and delete)
+ #
+ # The approach taken here is a).
+ #
+ if permission and state == 'present':
+ if module.check_mode:
+ result['msg'] = 'Notice: unable to check current resources, scopes and policies for permission. \
+ Would apply desired state without checking the current state.'
+ else:
+ kc.update_authz_permission(payload=payload, permission_type=permission_type, id=permission['id'], client_id=cid, realm=realm)
+ result['msg'] = 'Notice: unable to check current resources, scopes and policies for permission. \
+ Applying desired state without checking the current state.'
+
+ # Assume that something changed, although we don't know if that is the case.
+ result['changed'] = True
+ result['end_state'] = payload
+ elif not permission and state == 'present':
+ if module.check_mode:
+ result['msg'] = 'Would create permission'
+ else:
+ kc.create_authz_permission(payload=payload, permission_type=permission_type, client_id=cid, realm=realm)
+ result['msg'] = 'Permission created'
+
+ result['changed'] = True
+ result['end_state'] = payload
+ elif permission and state == 'absent':
+ if module.check_mode:
+ result['msg'] = 'Would remove permission'
+ else:
+ kc.remove_authz_permission(id=permission['id'], client_id=cid, realm=realm)
+ result['msg'] = 'Permission removed'
+
+ result['changed'] = True
+
+ elif not permission and state == 'absent':
+ result['changed'] = False
+ else:
+ module.fail_json(msg='Unable to determine what to do with permission %s of client %s in realm %s' % (
+ name, client_id, realm))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_authz_permission_info.py b/ansible_collections/community/general/plugins/modules/keycloak_authz_permission_info.py
new file mode 100644
index 000000000..8b4e96b41
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/keycloak_authz_permission_info.py
@@ -0,0 +1,173 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Eike Frost <ei@kefro.st>
+# Copyright (c) 2021, Christophe Gilles <christophe.gilles54@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_authz_permission_info
+
+version_added: 7.2.0
+
+short_description: Query Keycloak client authorization permissions information
+
+description:
+ - This module allows querying information about Keycloak client authorization permissions from the
+ resources endpoint via the Keycloak REST API. Authorization permissions are only available if a
+ client has Authorization enabled.
+
+ - This module requires access to the REST API via OpenID Connect; the user connecting and the realm
+ being used must have the requisite access rights. In a default Keycloak installation, admin-cli
+ and an admin user would work, as would a separate realm definition with the scope tailored
+ to your needs and a user having the expected roles.
+
+ - The names of module options are snake_cased versions of the camelCase options used by Keycloak.
+ The Authorization Services paths and payloads have not officially been documented by the Keycloak project.
+ U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/)
+
+options:
+ name:
+ description:
+ - Name of the authorization permission to create.
+ type: str
+ required: true
+ client_id:
+ description:
+ - The clientId of the keycloak client that should have the authorization scope.
+ - This is usually a human-readable name of the Keycloak client.
+ type: str
+ required: true
+ realm:
+ description:
+ - The name of the Keycloak realm the Keycloak client is in.
+ type: str
+ required: true
+
+extends_documentation_fragment:
+ - community.general.keycloak
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+author:
+ - Samuli Seppänen (@mattock)
+'''
+
+EXAMPLES = '''
+- name: Query Keycloak authorization permission
+ community.general.keycloak_authz_permission_info:
+ name: ScopePermission
+ client_id: myclient
+ realm: myrealm
+ auth_keycloak_url: http://localhost:8080/auth
+ auth_username: keycloak
+ auth_password: keycloak
+ auth_realm: master
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+
+queried_state:
+ description: State of the resource (a policy) as seen by Keycloak.
+ returned: on success
+ type: complex
+ contains:
+ id:
+ description: ID of the authorization permission.
+ type: str
+ sample: 9da05cd2-b273-4354-bbd8-0c133918a454
+ name:
+ description: Name of the authorization permission.
+ type: str
+ sample: ResourcePermission
+ description:
+ description: Description of the authorization permission.
+ type: str
+ sample: Resource Permission
+ type:
+ description: Type of the authorization permission.
+ type: str
+ sample: resource
+ decisionStrategy:
+ description: The decision strategy.
+ type: str
+ sample: UNANIMOUS
+ logic:
+ description: The logic used for the permission (part of the payload, but has a fixed value).
+ type: str
+ sample: POSITIVE
+ config:
+ description: Configuration of the permission (empty in all observed cases).
+ type: dict
+ sample: {}
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+
+ meta_args = dict(
+ name=dict(type='str', required=True),
+ client_id=dict(type='str', required=True),
+ realm=dict(type='str', required=True)
+ )
+
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=(
+ [['token', 'auth_realm', 'auth_username', 'auth_password']]),
+ required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+
+ # Convenience variables
+ name = module.params.get('name')
+ client_id = module.params.get('client_id')
+ realm = module.params.get('realm')
+
+ result = dict(changed=False, msg='', queried_state={})
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(module.params)
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+
+ kc = KeycloakAPI(module, connection_header)
+
+ # Get id of the client based on client_id
+ cid = kc.get_client_id(client_id, realm=realm)
+ if not cid:
+ module.fail_json(msg='Invalid client %s for realm %s' %
+ (client_id, realm))
+
+ # Get current state of the permission using its name as the search
+ # filter. This returns False if it is not found.
+ permission = kc.get_authz_permission_by_name(
+ name=name, client_id=cid, realm=realm)
+
+ result['queried_state'] = permission
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_client.py b/ansible_collections/community/general/plugins/modules/keycloak_client.py
index ee687fcb4..b151e4541 100644
--- a/ansible_collections/community/general/plugins/modules/keycloak_client.py
+++ b/ansible_collections/community/general/plugins/modules/keycloak_client.py
@@ -40,8 +40,8 @@ options:
state:
description:
- State of the client
- - On C(present), the client will be created (or updated if it exists already).
- - On C(absent), the client will be removed if it exists
+ - On V(present), the client will be created (or updated if it exists already).
+ - On V(absent), the client will be removed if it exists
choices: ['present', 'absent']
default: 'present'
type: str
@@ -55,7 +55,7 @@ options:
client_id:
description:
- Client id of client to be worked on. This is usually an alphanumeric name chosen by
- you. Either this or I(id) is required. If you specify both, I(id) takes precedence.
+ you. Either this or O(id) is required. If you specify both, O(id) takes precedence.
This is 'clientId' in the Keycloak REST API.
aliases:
- clientId
@@ -63,13 +63,13 @@ options:
id:
description:
- - Id of client to be worked on. This is usually an UUID. Either this or I(client_id)
+ - Id of client to be worked on. This is usually an UUID. Either this or O(client_id)
is required. If you specify both, this takes precedence.
type: str
name:
description:
- - Name of the client (this is not the same as I(client_id)).
+ - Name of the client (this is not the same as O(client_id)).
type: str
description:
@@ -108,12 +108,12 @@ options:
client_authenticator_type:
description:
- - How do clients authenticate with the auth server? Either C(client-secret) or
- C(client-jwt) can be chosen. When using C(client-secret), the module parameter
- I(secret) can set it, while for C(client-jwt), you can use the keys C(use.jwks.url),
- C(jwks.url), and C(jwt.credential.certificate) in the I(attributes) module parameter
+ - How do clients authenticate with the auth server? Either V(client-secret) or
+ V(client-jwt) can be chosen. When using V(client-secret), the module parameter
+ O(secret) can set it, while for V(client-jwt), you can use the keys C(use.jwks.url),
+ C(jwks.url), and C(jwt.credential.certificate) in the O(attributes) module parameter
to configure its behavior.
- This is 'clientAuthenticatorType' in the Keycloak REST API.
+ - This is 'clientAuthenticatorType' in the Keycloak REST API.
choices: ['client-secret', 'client-jwt']
aliases:
- clientAuthenticatorType
@@ -121,7 +121,7 @@ options:
secret:
description:
- - When using I(client_authenticator_type) C(client-secret) (the default), you can
+ - When using O(client_authenticator_type=client-secret) (the default), you can
specify a secret here (otherwise one will be generated if it does not exit). If
changing this secret, the module will not register a change currently (but the
changed secret will be saved).
@@ -246,7 +246,8 @@ options:
protocol:
description:
- - Type of client (either C(openid-connect) or C(saml).
+ - Type of client.
+ - At creation only, default value will be V(openid-connect) if O(protocol) is omitted.
type: str
choices: ['openid-connect', 'saml']
@@ -286,7 +287,7 @@ options:
use_template_config:
description:
- - Whether or not to use configuration from the I(client_template).
+ - Whether or not to use configuration from the O(client_template).
This is 'useTemplateConfig' in the Keycloak REST API.
aliases:
- useTemplateConfig
@@ -294,7 +295,7 @@ options:
use_template_scope:
description:
- - Whether or not to use scope configuration from the I(client_template).
+ - Whether or not to use scope configuration from the O(client_template).
This is 'useTemplateScope' in the Keycloak REST API.
aliases:
- useTemplateScope
@@ -302,7 +303,7 @@ options:
use_template_mappers:
description:
- - Whether or not to use mapper configuration from the I(client_template).
+ - Whether or not to use mapper configuration from the O(client_template).
This is 'useTemplateMappers' in the Keycloak REST API.
aliases:
- useTemplateMappers
@@ -391,38 +392,37 @@ options:
protocol:
description:
- - This is either C(openid-connect) or C(saml), this specifies for which protocol this protocol mapper.
- is active.
+ - This specifies for which protocol this protocol mapper is active.
choices: ['openid-connect', 'saml']
type: str
protocolMapper:
description:
- - The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is
+ - "The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is
impossible to provide since this may be extended through SPIs by the user of Keycloak,
- by default Keycloak as of 3.4 ships with at least
- - C(docker-v2-allow-all-mapper)
- - C(oidc-address-mapper)
- - C(oidc-full-name-mapper)
- - C(oidc-group-membership-mapper)
- - C(oidc-hardcoded-claim-mapper)
- - C(oidc-hardcoded-role-mapper)
- - C(oidc-role-name-mapper)
- - C(oidc-script-based-protocol-mapper)
- - C(oidc-sha256-pairwise-sub-mapper)
- - C(oidc-usermodel-attribute-mapper)
- - C(oidc-usermodel-client-role-mapper)
- - C(oidc-usermodel-property-mapper)
- - C(oidc-usermodel-realm-role-mapper)
- - C(oidc-usersessionmodel-note-mapper)
- - C(saml-group-membership-mapper)
- - C(saml-hardcode-attribute-mapper)
- - C(saml-hardcode-role-mapper)
- - C(saml-role-list-mapper)
- - C(saml-role-name-mapper)
- - C(saml-user-attribute-mapper)
- - C(saml-user-property-mapper)
- - C(saml-user-session-note-mapper)
+ by default Keycloak as of 3.4 ships with at least:"
+ - V(docker-v2-allow-all-mapper)
+ - V(oidc-address-mapper)
+ - V(oidc-full-name-mapper)
+ - V(oidc-group-membership-mapper)
+ - V(oidc-hardcoded-claim-mapper)
+ - V(oidc-hardcoded-role-mapper)
+ - V(oidc-role-name-mapper)
+ - V(oidc-script-based-protocol-mapper)
+ - V(oidc-sha256-pairwise-sub-mapper)
+ - V(oidc-usermodel-attribute-mapper)
+ - V(oidc-usermodel-client-role-mapper)
+ - V(oidc-usermodel-property-mapper)
+ - V(oidc-usermodel-realm-role-mapper)
+ - V(oidc-usersessionmodel-note-mapper)
+ - V(saml-group-membership-mapper)
+ - V(saml-hardcode-attribute-mapper)
+ - V(saml-hardcode-role-mapper)
+ - V(saml-role-list-mapper)
+ - V(saml-role-name-mapper)
+ - V(saml-user-attribute-mapper)
+ - V(saml-user-property-mapper)
+ - V(saml-user-session-note-mapper)
- An exhaustive list of available mappers on your installation can be obtained on
the admin console by going to Server Info -> Providers and looking under
'protocol-mapper'.
@@ -431,10 +431,10 @@ options:
config:
description:
- Dict specifying the configuration options for the protocol mapper; the
- contents differ depending on the value of I(protocolMapper) and are not documented
+ contents differ depending on the value of O(protocol_mappers[].protocolMapper) and are not documented
other than by the source of the mappers and its parent class(es). An example is given
below. It is easiest to obtain valid config values by dumping an already-existing
- protocol mapper configuration through check-mode in the I(existing) field.
+ protocol mapper configuration through check-mode in the RV(existing) field.
type: dict
attributes:
@@ -478,7 +478,7 @@ options:
saml.signature.algorithm:
description:
- - Signature algorithm used to sign SAML documents. One of C(RSA_SHA256), C(RSA_SHA1), C(RSA_SHA512), or C(DSA_SHA1).
+ - Signature algorithm used to sign SAML documents. One of V(RSA_SHA256), V(RSA_SHA1), V(RSA_SHA512), or V(DSA_SHA1).
saml.signing.certificate:
description:
@@ -503,15 +503,15 @@ options:
saml_name_id_format:
description:
- - For SAML clients, the NameID format to use (one of C(username), C(email), C(transient), or C(persistent))
+ - For SAML clients, the NameID format to use (one of V(username), V(email), V(transient), or V(persistent))
saml_signature_canonicalization_method:
description:
- SAML signature canonicalization method. This is one of four values, namely
- C(http://www.w3.org/2001/10/xml-exc-c14n#) for EXCLUSIVE,
- C(http://www.w3.org/2001/10/xml-exc-c14n#WithComments) for EXCLUSIVE_WITH_COMMENTS,
- C(http://www.w3.org/TR/2001/REC-xml-c14n-20010315) for INCLUSIVE, and
- C(http://www.w3.org/TR/2001/REC-xml-c14n-20010315#WithComments) for INCLUSIVE_WITH_COMMENTS.
+ V(http://www.w3.org/2001/10/xml-exc-c14n#) for EXCLUSIVE,
+ V(http://www.w3.org/2001/10/xml-exc-c14n#WithComments) for EXCLUSIVE_WITH_COMMENTS,
+ V(http://www.w3.org/TR/2001/REC-xml-c14n-20010315) for INCLUSIVE, and
+ V(http://www.w3.org/TR/2001/REC-xml-c14n-20010315#WithComments) for INCLUSIVE_WITH_COMMENTS.
saml_single_logout_service_url_post:
description:
@@ -523,12 +523,12 @@ options:
user.info.response.signature.alg:
description:
- - For OpenID-Connect clients, JWA algorithm for signed UserInfo-endpoint responses. One of C(RS256) or C(unsigned).
+ - For OpenID-Connect clients, JWA algorithm for signed UserInfo-endpoint responses. One of V(RS256) or V(unsigned).
request.object.signature.alg:
description:
- For OpenID-Connect clients, JWA algorithm which the client needs to use when sending
- OIDC request object. One of C(any), C(none), C(RS256).
+ OIDC request object. One of V(any), V(none), V(RS256).
use.jwks.url:
description:
@@ -717,11 +717,16 @@ end_state:
'''
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
- keycloak_argument_spec, get_token, KeycloakError
+ keycloak_argument_spec, get_token, KeycloakError, is_struct_included
from ansible.module_utils.basic import AnsibleModule
import copy
+PROTOCOL_OPENID_CONNECT = 'openid-connect'
+PROTOCOL_SAML = 'saml'
+CLIENT_META_DATA = ['authorizationServicesEnabled']
+
+
def normalise_cr(clientrep, remove_ids=False):
""" Re-sorts any properties where the order so that diff's is minimised, and adds default values where appropriate so that the
the change detection is more effective.
@@ -780,7 +785,7 @@ def main():
consentText=dict(type='str'),
id=dict(type='str'),
name=dict(type='str'),
- protocol=dict(type='str', choices=['openid-connect', 'saml']),
+ protocol=dict(type='str', choices=[PROTOCOL_OPENID_CONNECT, PROTOCOL_SAML]),
protocolMapper=dict(type='str'),
config=dict(type='dict'),
)
@@ -814,7 +819,7 @@ def main():
authorization_services_enabled=dict(type='bool', aliases=['authorizationServicesEnabled']),
public_client=dict(type='bool', aliases=['publicClient']),
frontchannel_logout=dict(type='bool', aliases=['frontchannelLogout']),
- protocol=dict(type='str', choices=['openid-connect', 'saml']),
+ protocol=dict(type='str', choices=[PROTOCOL_OPENID_CONNECT, PROTOCOL_SAML]),
attributes=dict(type='dict'),
full_scope_allowed=dict(type='bool', aliases=['fullScopeAllowed']),
node_re_registration_timeout=dict(type='int', aliases=['nodeReRegistrationTimeout']),
@@ -912,6 +917,8 @@ def main():
if 'clientId' not in desired_client:
module.fail_json(msg='client_id needs to be specified when creating a new client')
+ if 'protocol' not in desired_client:
+ desired_client['protocol'] = PROTOCOL_OPENID_CONNECT
if module._diff:
result['diff'] = dict(before='', after=sanitize_cr(desired_client))
@@ -940,7 +947,7 @@ def main():
if module._diff:
result['diff'] = dict(before=sanitize_cr(before_norm),
after=sanitize_cr(desired_norm))
- result['changed'] = (before_norm != desired_norm)
+ result['changed'] = not is_struct_included(desired_norm, before_norm, CLIENT_META_DATA)
module.exit_json(**result)
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_client_rolemapping.py b/ansible_collections/community/general/plugins/modules/keycloak_client_rolemapping.py
index 57dcac48d..be419904a 100644
--- a/ansible_collections/community/general/plugins/modules/keycloak_client_rolemapping.py
+++ b/ansible_collections/community/general/plugins/modules/keycloak_client_rolemapping.py
@@ -43,8 +43,8 @@ options:
state:
description:
- State of the client_rolemapping.
- - On C(present), the client_rolemapping will be created if it does not yet exist, or updated with the parameters you provide.
- - On C(absent), the client_rolemapping will be removed if it exists.
+ - On V(present), the client_rolemapping will be created if it does not yet exist, or updated with the parameters you provide.
+ - On V(absent), the client_rolemapping will be removed if it exists.
default: 'present'
type: str
choices:
@@ -63,6 +63,33 @@ options:
- Name of the group to be mapped.
- This parameter is required (can be replaced by gid for less API call).
+ parents:
+ version_added: "7.1.0"
+ type: list
+ description:
+ - List of parent groups for the group to handle sorted top to bottom.
+ - >-
+ Set this if your group is a subgroup and you do not provide the GID in O(gid).
+ elements: dict
+ suboptions:
+ id:
+ type: str
+ description:
+ - Identify parent by ID.
+ - Needs less API calls than using O(parents[].name).
+ - A deep parent chain can be started at any point when first given parent is given as ID.
+ - Note that in principle both ID and name can be specified at the same time
+ but current implementation only always use just one of them, with ID
+ being preferred.
+ name:
+ type: str
+ description:
+ - Identify parent by name.
+ - Needs more internal API calls than using O(parents[].id) to map names to ID's under the hood.
+ - When giving a parent chain with only names it must be complete up to the top.
+ - Note that in principle both ID and name can be specified at the same time
+ but current implementation only always use just one of them, with ID
+ being preferred.
gid:
type: str
description:
@@ -73,7 +100,7 @@ options:
client_id:
type: str
description:
- - Name of the client to be mapped (different than I(cid)).
+ - Name of the client to be mapped (different than O(cid)).
- This parameter is required (can be replaced by cid for less API call).
cid:
@@ -144,6 +171,24 @@ EXAMPLES = '''
id: role_id2
delegate_to: localhost
+- name: Map a client role to a subgroup, authentication with token
+ community.general.keycloak_client_rolemapping:
+ realm: MyCustomRealm
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ token: TOKEN
+ state: present
+ client_id: client1
+ group_name: subgroup1
+ parents:
+ - name: parent-group
+ roles:
+ - name: role_name1
+ id: role_id1
+ - name: role_name2
+ id: role_id2
+ delegate_to: localhost
+
- name: Unmap client role from a group
community.general.keycloak_client_rolemapping:
realm: MyCustomRealm
@@ -230,6 +275,13 @@ def main():
realm=dict(default='master'),
gid=dict(type='str'),
group_name=dict(type='str'),
+ parents=dict(
+ type='list', elements='dict',
+ options=dict(
+ id=dict(type='str'),
+ name=dict(type='str')
+ ),
+ ),
cid=dict(type='str'),
client_id=dict(type='str'),
roles=dict(type='list', elements='dict', options=roles_spec),
@@ -259,6 +311,7 @@ def main():
gid = module.params.get('gid')
group_name = module.params.get('group_name')
roles = module.params.get('roles')
+ parents = module.params.get('parents')
# Check the parameters
if cid is None and client_id is None:
@@ -268,7 +321,7 @@ def main():
# Get the potential missing parameters
if gid is None:
- group_rep = kc.get_group_by_name(group_name, realm=realm)
+ group_rep = kc.get_group_by_name(group_name, realm=realm, parents=parents)
if group_rep is not None:
gid = group_rep['id']
else:
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_clientscope.py b/ansible_collections/community/general/plugins/modules/keycloak_clientscope.py
index a23d92867..d37af5f0c 100644
--- a/ansible_collections/community/general/plugins/modules/keycloak_clientscope.py
+++ b/ansible_collections/community/general/plugins/modules/keycloak_clientscope.py
@@ -43,8 +43,8 @@ options:
state:
description:
- State of the client_scope.
- - On C(present), the client_scope will be created if it does not yet exist, or updated with the parameters you provide.
- - On C(absent), the client_scope will be removed if it exists.
+ - On V(present), the client_scope will be created if it does not yet exist, or updated with the parameters you provide.
+ - On V(absent), the client_scope will be removed if it exists.
default: 'present'
type: str
choices:
@@ -103,28 +103,28 @@ options:
- "The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is
impossible to provide since this may be extended through SPIs by the user of Keycloak,
by default Keycloak as of 3.4 ships with at least:"
- - C(docker-v2-allow-all-mapper)
- - C(oidc-address-mapper)
- - C(oidc-full-name-mapper)
- - C(oidc-group-membership-mapper)
- - C(oidc-hardcoded-claim-mapper)
- - C(oidc-hardcoded-role-mapper)
- - C(oidc-role-name-mapper)
- - C(oidc-script-based-protocol-mapper)
- - C(oidc-sha256-pairwise-sub-mapper)
- - C(oidc-usermodel-attribute-mapper)
- - C(oidc-usermodel-client-role-mapper)
- - C(oidc-usermodel-property-mapper)
- - C(oidc-usermodel-realm-role-mapper)
- - C(oidc-usersessionmodel-note-mapper)
- - C(saml-group-membership-mapper)
- - C(saml-hardcode-attribute-mapper)
- - C(saml-hardcode-role-mapper)
- - C(saml-role-list-mapper)
- - C(saml-role-name-mapper)
- - C(saml-user-attribute-mapper)
- - C(saml-user-property-mapper)
- - C(saml-user-session-note-mapper)
+ - V(docker-v2-allow-all-mapper)
+ - V(oidc-address-mapper)
+ - V(oidc-full-name-mapper)
+ - V(oidc-group-membership-mapper)
+ - V(oidc-hardcoded-claim-mapper)
+ - V(oidc-hardcoded-role-mapper)
+ - V(oidc-role-name-mapper)
+ - V(oidc-script-based-protocol-mapper)
+ - V(oidc-sha256-pairwise-sub-mapper)
+ - V(oidc-usermodel-attribute-mapper)
+ - V(oidc-usermodel-client-role-mapper)
+ - V(oidc-usermodel-property-mapper)
+ - V(oidc-usermodel-realm-role-mapper)
+ - V(oidc-usersessionmodel-note-mapper)
+ - V(saml-group-membership-mapper)
+ - V(saml-hardcode-attribute-mapper)
+ - V(saml-hardcode-role-mapper)
+ - V(saml-role-list-mapper)
+ - V(saml-role-name-mapper)
+ - V(saml-user-attribute-mapper)
+ - V(saml-user-property-mapper)
+ - V(saml-user-session-note-mapper)
- An exhaustive list of available mappers on your installation can be obtained on
the admin console by going to Server Info -> Providers and looking under
'protocol-mapper'.
@@ -143,10 +143,10 @@ options:
config:
description:
- Dict specifying the configuration options for the protocol mapper; the
- contents differ depending on the value of I(protocolMapper) and are not documented
+ contents differ depending on the value of O(protocol_mappers[].protocolMapper) and are not documented
other than by the source of the mappers and its parent class(es). An example is given
below. It is easiest to obtain valid config values by dumping an already-existing
- protocol mapper configuration through check-mode in the C(existing) return value.
+ protocol mapper configuration through check-mode in the RV(existing) return value.
type: dict
attributes:
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_clientscope_type.py b/ansible_collections/community/general/plugins/modules/keycloak_clientscope_type.py
index facf02aa4..37a5d3be9 100644
--- a/ansible_collections/community/general/plugins/modules/keycloak_clientscope_type.py
+++ b/ansible_collections/community/general/plugins/modules/keycloak_clientscope_type.py
@@ -40,7 +40,7 @@ options:
client_id:
description:
- - The I(client_id) of the client. If not set the clientscop types are set as a default for the realm.
+ - The O(client_id) of the client. If not set the clientscop types are set as a default for the realm.
aliases:
- clientId
type: str
@@ -67,7 +67,7 @@ author:
EXAMPLES = '''
- name: Set default client scopes on realm level
- community.general.keycloak_clientsecret_info:
+ community.general.keycloak_clientscope_type:
auth_client_id: admin-cli
auth_keycloak_url: https://auth.example.com/auth
auth_realm: master
@@ -79,7 +79,7 @@ EXAMPLES = '''
- name: Set default and optional client scopes on client level with token auth
- community.general.keycloak_clientsecret_info:
+ community.general.keycloak_clientscope_type:
auth_client_id: admin-cli
auth_keycloak_url: https://auth.example.com/auth
token: TOKEN
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_clientsecret_info.py b/ansible_collections/community/general/plugins/modules/keycloak_clientsecret_info.py
index 98a41ad20..c77262035 100644
--- a/ansible_collections/community/general/plugins/modules/keycloak_clientsecret_info.py
+++ b/ansible_collections/community/general/plugins/modules/keycloak_clientsecret_info.py
@@ -26,8 +26,8 @@ description:
and a user having the expected roles.
- When retrieving a new client secret, where possible provide the client's
- I(id) (not I(client_id)) to the module. This removes a lookup to the API to
- translate the I(client_id) into the client ID.
+ O(id) (not O(client_id)) to the module. This removes a lookup to the API to
+ translate the O(client_id) into the client ID.
- "Note that this module returns the client secret. To avoid this showing up in the logs,
please add C(no_log: true) to the task."
@@ -48,7 +48,7 @@ options:
client_id:
description:
- - The I(client_id) of the client. Passing this instead of I(id) results in an
+ - The O(client_id) of the client. Passing this instead of O(id) results in an
extra API call.
aliases:
- clientId
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_clienttemplate.py b/ansible_collections/community/general/plugins/modules/keycloak_clienttemplate.py
index d2555afc5..cd7f6c09b 100644
--- a/ansible_collections/community/general/plugins/modules/keycloak_clienttemplate.py
+++ b/ansible_collections/community/general/plugins/modules/keycloak_clienttemplate.py
@@ -38,8 +38,8 @@ options:
state:
description:
- State of the client template.
- - On C(present), the client template will be created (or updated if it exists already).
- - On C(absent), the client template will be removed if it exists
+ - On V(present), the client template will be created (or updated if it exists already).
+ - On V(absent), the client template will be removed if it exists
choices: ['present', 'absent']
default: 'present'
type: str
@@ -67,7 +67,7 @@ options:
protocol:
description:
- - Type of client template (either C(openid-connect) or C(saml).
+ - Type of client template.
choices: ['openid-connect', 'saml']
type: str
@@ -106,38 +106,37 @@ options:
protocol:
description:
- - This is either C(openid-connect) or C(saml), this specifies for which protocol this protocol mapper.
- is active.
+ - This specifies for which protocol this protocol mapper is active.
choices: ['openid-connect', 'saml']
type: str
protocolMapper:
description:
- - The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is
+ - "The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is
impossible to provide since this may be extended through SPIs by the user of Keycloak,
- by default Keycloak as of 3.4 ships with at least
- - C(docker-v2-allow-all-mapper)
- - C(oidc-address-mapper)
- - C(oidc-full-name-mapper)
- - C(oidc-group-membership-mapper)
- - C(oidc-hardcoded-claim-mapper)
- - C(oidc-hardcoded-role-mapper)
- - C(oidc-role-name-mapper)
- - C(oidc-script-based-protocol-mapper)
- - C(oidc-sha256-pairwise-sub-mapper)
- - C(oidc-usermodel-attribute-mapper)
- - C(oidc-usermodel-client-role-mapper)
- - C(oidc-usermodel-property-mapper)
- - C(oidc-usermodel-realm-role-mapper)
- - C(oidc-usersessionmodel-note-mapper)
- - C(saml-group-membership-mapper)
- - C(saml-hardcode-attribute-mapper)
- - C(saml-hardcode-role-mapper)
- - C(saml-role-list-mapper)
- - C(saml-role-name-mapper)
- - C(saml-user-attribute-mapper)
- - C(saml-user-property-mapper)
- - C(saml-user-session-note-mapper)
+ by default Keycloak as of 3.4 ships with at least:"
+ - V(docker-v2-allow-all-mapper)
+ - V(oidc-address-mapper)
+ - V(oidc-full-name-mapper)
+ - V(oidc-group-membership-mapper)
+ - V(oidc-hardcoded-claim-mapper)
+ - V(oidc-hardcoded-role-mapper)
+ - V(oidc-role-name-mapper)
+ - V(oidc-script-based-protocol-mapper)
+ - V(oidc-sha256-pairwise-sub-mapper)
+ - V(oidc-usermodel-attribute-mapper)
+ - V(oidc-usermodel-client-role-mapper)
+ - V(oidc-usermodel-property-mapper)
+ - V(oidc-usermodel-realm-role-mapper)
+ - V(oidc-usersessionmodel-note-mapper)
+ - V(saml-group-membership-mapper)
+ - V(saml-hardcode-attribute-mapper)
+ - V(saml-hardcode-role-mapper)
+ - V(saml-role-list-mapper)
+ - V(saml-role-name-mapper)
+ - V(saml-user-attribute-mapper)
+ - V(saml-user-property-mapper)
+ - V(saml-user-session-note-mapper)
- An exhaustive list of available mappers on your installation can be obtained on
the admin console by going to Server Info -> Providers and looking under
'protocol-mapper'.
@@ -146,10 +145,10 @@ options:
config:
description:
- Dict specifying the configuration options for the protocol mapper; the
- contents differ depending on the value of I(protocolMapper) and are not documented
+ contents differ depending on the value of O(protocol_mappers[].protocolMapper) and are not documented
other than by the source of the mappers and its parent class(es). An example is given
below. It is easiest to obtain valid config values by dumping an already-existing
- protocol mapper configuration through check-mode in the I(existing) field.
+ protocol mapper configuration through check-mode in the RV(existing) field.
type: dict
attributes:
@@ -160,9 +159,9 @@ options:
type: dict
notes:
- - The Keycloak REST API defines further fields (namely I(bearerOnly), I(consentRequired), I(standardFlowEnabled),
- I(implicitFlowEnabled), I(directAccessGrantsEnabled), I(serviceAccountsEnabled), I(publicClient), and
- I(frontchannelLogout)) which, while available with keycloak_client, do not have any effect on
+ - The Keycloak REST API defines further fields (namely C(bearerOnly), C(consentRequired), C(standardFlowEnabled),
+ C(implicitFlowEnabled), C(directAccessGrantsEnabled), C(serviceAccountsEnabled), C(publicClient), and
+ C(frontchannelLogout)) which, while available with keycloak_client, do not have any effect on
Keycloak client-templates and are discarded if supplied with an API request changing client-templates. As such,
they are not available through this module.
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_component_info.py b/ansible_collections/community/general/plugins/modules/keycloak_component_info.py
new file mode 100644
index 000000000..a788735d9
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/keycloak_component_info.py
@@ -0,0 +1,169 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_component_info
+
+short_description: Retrive component info in Keycloak
+
+version_added: 8.2.0
+
+description:
+ - This module retrive information on component from Keycloak.
+options:
+ realm:
+ description:
+ - The name of the realm.
+ required: true
+ type: str
+ name:
+ description:
+ - Name of the Component.
+ type: str
+ provider_type:
+ description:
+ - Provider type of components.
+ - "Example:
+ V(org.keycloak.storage.UserStorageProvider),
+ V(org.keycloak.services.clientregistration.policy.ClientRegistrationPolicy),
+ V(org.keycloak.keys.KeyProvider),
+ V(org.keycloak.userprofile.UserProfileProvider),
+ V(org.keycloak.storage.ldap.mappers.LDAPStorageMapper)."
+ type: str
+ parent_id:
+ description:
+ - Container ID of the components.
+ type: str
+
+
+extends_documentation_fragment:
+ - community.general.keycloak
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+author:
+ - Andre Desrosiers (@desand01)
+'''
+
+EXAMPLES = '''
+ - name: Retrive info of a UserStorageProvider named myldap
+ community.general.keycloak_component_info:
+ auth_keycloak_url: http://localhost:8080/auth
+ auth_sername: admin
+ auth_password: password
+ auth_realm: master
+ realm: myrealm
+ name: myldap
+ provider_type: org.keycloak.storage.UserStorageProvider
+
+ - name: Retrive key info component
+ community.general.keycloak_component_info:
+ auth_keycloak_url: http://localhost:8080/auth
+ auth_sername: admin
+ auth_password: password
+ auth_realm: master
+ realm: myrealm
+ name: rsa-enc-generated
+ provider_type: org.keycloak.keys.KeyProvider
+
+ - name: Retrive all component from realm master
+ community.general.keycloak_component_info:
+ auth_keycloak_url: http://localhost:8080/auth
+ auth_sername: admin
+ auth_password: password
+ auth_realm: master
+ realm: myrealm
+
+ - name: Retrive all sub components of parent component filter by type
+ community.general.keycloak_component_info:
+ auth_keycloak_url: http://localhost:8080/auth
+ auth_sername: admin
+ auth_password: password
+ auth_realm: master
+ realm: myrealm
+ parent_id: "075ef2fa-19fc-4a6d-bf4c-249f57365fd2"
+ provider_type: "org.keycloak.storage.ldap.mappers.LDAPStorageMapper"
+
+
+'''
+
+RETURN = '''
+components:
+ description: JSON representation of components.
+ returned: always
+ type: list
+ elements: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import quote
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+
+ meta_args = dict(
+ name=dict(type='str'),
+ realm=dict(type='str', required=True),
+ parent_id=dict(type='str'),
+ provider_type=dict(type='str'),
+ )
+
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ result = dict(changed=False, components=[])
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(module.params)
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+
+ kc = KeycloakAPI(module, connection_header)
+
+ realm = module.params.get('realm')
+ parentId = module.params.get('parent_id')
+ name = module.params.get('name')
+ providerType = module.params.get('provider_type')
+
+ objRealm = kc.get_realm_by_id(realm)
+ if not objRealm:
+ module.fail_json(msg="Failed to retrive realm '{realm}'".format(realm=realm))
+
+ filters = []
+
+ if parentId:
+ filters.append("parent=%s" % (quote(parentId, safe='')))
+ else:
+ filters.append("parent=%s" % (quote(objRealm['id'], safe='')))
+
+ if name:
+ filters.append("name=%s" % (quote(name, safe='')))
+ if providerType:
+ filters.append("type=%s" % (quote(providerType, safe='')))
+
+ result['components'] = kc.get_components(filter="&".join(filters), realm=realm)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_group.py b/ansible_collections/community/general/plugins/modules/keycloak_group.py
index 399bc5b4f..5398a4b5d 100644
--- a/ansible_collections/community/general/plugins/modules/keycloak_group.py
+++ b/ansible_collections/community/general/plugins/modules/keycloak_group.py
@@ -41,9 +41,9 @@ options:
state:
description:
- State of the group.
- - On C(present), the group will be created if it does not yet exist, or updated with the parameters you provide.
+ - On V(present), the group will be created if it does not yet exist, or updated with the parameters you provide.
- >-
- On C(absent), the group will be removed if it exists. Be aware that absenting
+ On V(absent), the group will be removed if it exists. Be aware that absenting
a group with subgroups will automatically delete all its subgroups too.
default: 'present'
type: str
@@ -93,7 +93,7 @@ options:
type: str
description:
- Identify parent by ID.
- - Needs less API calls than using I(name).
+ - Needs less API calls than using O(parents[].name).
- A deep parent chain can be started at any point when first given parent is given as ID.
- Note that in principle both ID and name can be specified at the same time
but current implementation only always use just one of them, with ID
@@ -102,14 +102,14 @@ options:
type: str
description:
- Identify parent by name.
- - Needs more internal API calls than using I(id) to map names to ID's under the hood.
+ - Needs more internal API calls than using O(parents[].id) to map names to ID's under the hood.
- When giving a parent chain with only names it must be complete up to the top.
- Note that in principle both ID and name can be specified at the same time
but current implementation only always use just one of them, with ID
being preferred.
notes:
- - Presently, the I(realmRoles), I(clientRoles) and I(access) attributes returned by the Keycloak API
+ - Presently, the RV(end_state.realmRoles), RV(end_state.clientRoles), and RV(end_state.access) attributes returned by the Keycloak API
are read-only for groups. This limitation will be removed in a later version of this module.
extends_documentation_fragment:
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_identity_provider.py b/ansible_collections/community/general/plugins/modules/keycloak_identity_provider.py
index 0d12ae03a..588f553e8 100644
--- a/ansible_collections/community/general/plugins/modules/keycloak_identity_provider.py
+++ b/ansible_collections/community/general/plugins/modules/keycloak_identity_provider.py
@@ -36,8 +36,8 @@ options:
state:
description:
- State of the identity provider.
- - On C(present), the identity provider will be created if it does not yet exist, or updated with the parameters you provide.
- - On C(absent), the identity provider will be removed if it exists.
+ - On V(present), the identity provider will be created if it does not yet exist, or updated with the parameters you provide.
+ - On V(absent), the identity provider will be removed if it exists.
default: 'present'
type: str
choices:
@@ -120,16 +120,16 @@ options:
provider_id:
description:
- - Protocol used by this provider (supported values are C(oidc) or C(saml)).
+ - Protocol used by this provider (supported values are V(oidc) or V(saml)).
aliases:
- providerId
type: str
config:
description:
- - Dict specifying the configuration options for the provider; the contents differ depending on the value of I(providerId).
- Examples are given below for C(oidc) and C(saml). It is easiest to obtain valid config values by dumping an already-existing
- identity provider configuration through check-mode in the I(existing) field.
+ - Dict specifying the configuration options for the provider; the contents differ depending on the value of O(provider_id).
+ Examples are given below for V(oidc) and V(saml). It is easiest to obtain valid config values by dumping an already-existing
+ identity provider configuration through check-mode in the RV(existing) field.
type: dict
suboptions:
hide_on_login_page:
@@ -271,7 +271,8 @@ options:
config:
description:
- - Dict specifying the configuration options for the mapper; the contents differ depending on the value of I(identityProviderMapper).
+ - Dict specifying the configuration options for the mapper; the contents differ depending on the value of
+ O(mappers[].identityProviderMapper).
type: dict
extends_documentation_fragment:
@@ -541,10 +542,14 @@ def main():
old_mapper = dict()
new_mapper = old_mapper.copy()
new_mapper.update(change)
- if new_mapper != old_mapper:
- if changeset.get('mappers') is None:
- changeset['mappers'] = list()
- changeset['mappers'].append(new_mapper)
+
+ if changeset.get('mappers') is None:
+ changeset['mappers'] = list()
+ # eventually this holds all desired mappers, unchanged, modified and newly added
+ changeset['mappers'].append(new_mapper)
+
+ # ensure idempotency in case module.params.mappers is not sorted by name
+ changeset['mappers'] = sorted(changeset['mappers'], key=lambda x: x.get('id') if x.get('name') is None else x['name'])
# Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis)
desired_idp = before_idp.copy()
@@ -611,10 +616,17 @@ def main():
# do the update
desired_idp = desired_idp.copy()
updated_mappers = desired_idp.pop('mappers', [])
+ original_mappers = list(before_idp.get('mappers', []))
+
kc.update_identity_provider(desired_idp, realm)
for mapper in updated_mappers:
if mapper.get('id') is not None:
- kc.update_identity_provider_mapper(mapper, alias, realm)
+ # only update existing if there is a change
+ for i, orig in enumerate(original_mappers):
+ if mapper['id'] == orig['id']:
+ del original_mappers[i]
+ if mapper != orig:
+ kc.update_identity_provider_mapper(mapper, alias, realm)
else:
if mapper.get('identityProviderAlias') is None:
mapper['identityProviderAlias'] = alias
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_realm.py b/ansible_collections/community/general/plugins/modules/keycloak_realm.py
index 53f81be48..9f2e72b52 100644
--- a/ansible_collections/community/general/plugins/modules/keycloak_realm.py
+++ b/ansible_collections/community/general/plugins/modules/keycloak_realm.py
@@ -42,8 +42,8 @@ options:
state:
description:
- State of the realm.
- - On C(present), the realm will be created (or updated if it exists already).
- - On C(absent), the realm will be removed if it exists.
+ - On V(present), the realm will be created (or updated if it exists already).
+ - On V(absent), the realm will be removed if it exists.
choices: ['present', 'absent']
default: 'present'
type: str
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_realm_key.py b/ansible_collections/community/general/plugins/modules/keycloak_realm_key.py
new file mode 100644
index 000000000..6e762fba9
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/keycloak_realm_key.py
@@ -0,0 +1,475 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Eike Frost <ei@kefro.st>
+# Copyright (c) 2021, Christophe Gilles <christophe.gilles54@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_realm_key
+
+short_description: Allows administration of Keycloak realm keys via Keycloak API
+
+version_added: 7.5.0
+
+description:
+ - This module allows the administration of Keycloak realm keys via the Keycloak REST API. It
+ requires access to the REST API via OpenID Connect; the user connecting and the realm being
+ used must have the requisite access rights. In a default Keycloak installation, admin-cli
+ and an admin user would work, as would a separate realm definition with the scope tailored
+ to your needs and a user having the expected roles.
+
+ - The names of module options are snake_cased versions of the camelCase ones found in the
+ Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
+ Aliases are provided so camelCased versions can be used as well.
+
+ - This module is unable to detect changes to the actual cryptographic key after importing it.
+ However, if some other property is changed alongside the cryptographic key, then the key
+ will also get changed as a side-effect, as the JSON payload needs to include the private key.
+ This can be considered either a bug or a feature, as the alternative would be to always
+ update the realm key whether it has changed or not.
+
+ - If certificate is not explicitly provided it will be dynamically created by Keycloak.
+ Therefore comparing the current state of the certificate to the desired state (which may be
+ empty) is not possible.
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: partial
+
+options:
+ state:
+ description:
+ - State of the keycloak realm key.
+ - On V(present), the realm key will be created (or updated if it exists already).
+ - On V(absent), the realm key will be removed if it exists.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ name:
+ description:
+ - Name of the realm key to create.
+ type: str
+ required: true
+ force:
+ description:
+ - Enforce the state of the private key and certificate. This is not automatically the
+ case as this module is unable to determine the current state of the private key and
+ thus cannot trigger an update based on an actual divergence. That said, a private key
+ update may happen even if force is false as a side-effect of other changes.
+ default: false
+ type: bool
+ parent_id:
+ description:
+ - The parent_id of the realm key. In practice the ID (name) of the realm.
+ type: str
+ required: true
+ provider_id:
+ description:
+ - The name of the "provider ID" for the key.
+ - The value V(rsa-enc) has been added in community.general 8.2.0.
+ choices: ['rsa', 'rsa-enc']
+ default: 'rsa'
+ type: str
+ config:
+ description:
+ - Dict specifying the key and its properties.
+ type: dict
+ suboptions:
+ active:
+ description:
+ - Whether they key is active or inactive. Not to be confused with the state
+ of the Ansible resource managed by the O(state) parameter.
+ default: true
+ type: bool
+ enabled:
+ description:
+ - Whether the key is enabled or disabled. Not to be confused with the state
+ of the Ansible resource managed by the O(state) parameter.
+ default: true
+ type: bool
+ priority:
+ description:
+ - The priority of the key.
+ type: int
+ required: true
+ algorithm:
+ description:
+ - Key algorithm.
+ - The values V(RS384), V(RS512), V(PS256), V(PS384), V(PS512), V(RSA1_5),
+ V(RSA-OAEP), V(RSA-OAEP-256) have been added in community.general 8.2.0.
+ default: RS256
+ choices: ['RS256', 'RS384', 'RS512', 'PS256', 'PS384', 'PS512', 'RSA1_5', 'RSA-OAEP', 'RSA-OAEP-256']
+ type: str
+ private_key:
+ description:
+ - The private key as an ASCII string. Contents of the key must match O(config.algorithm)
+ and O(provider_id).
+ - Please note that the module cannot detect whether the private key specified differs from the
+ current state's private key. Use O(force=true) to force the module to update the private key
+ if you expect it to be updated.
+ required: true
+ type: str
+ certificate:
+ description:
+ - A certificate signed with the private key as an ASCII string. Contents of the
+ key must match O(config.algorithm) and O(provider_id).
+ - If you want Keycloak to automatically generate a certificate using your private key
+ then set this to an empty string.
+ required: true
+ type: str
+notes:
+ - Current value of the private key cannot be fetched from Keycloak.
+ Therefore comparing its desired state to the current state is not
+ possible.
+ - If certificate is not explicitly provided it will be dynamically created
+ by Keycloak. Therefore comparing the current state of the certificate to
+ the desired state (which may be empty) is not possible.
+ - Due to the private key and certificate options the module is
+ B(not fully idempotent). You can use O(force=true) to force the module
+ to always update if you know that the private key might have changed.
+
+extends_documentation_fragment:
+ - community.general.keycloak
+ - community.general.attributes
+
+author:
+ - Samuli Seppänen (@mattock)
+'''
+
+EXAMPLES = '''
+- name: Manage Keycloak realm key (certificate autogenerated by Keycloak)
+ community.general.keycloak_realm_key:
+ name: custom
+ state: present
+ parent_id: master
+ provider_id: rsa
+ auth_keycloak_url: http://localhost:8080/auth
+ auth_username: keycloak
+ auth_password: keycloak
+ auth_realm: master
+ config:
+ private_key: "{{ private_key }}"
+ certificate: ""
+ enabled: true
+ active: true
+ priority: 120
+ algorithm: RS256
+- name: Manage Keycloak realm key and certificate
+ community.general.keycloak_realm_key:
+ name: custom
+ state: present
+ parent_id: master
+ provider_id: rsa
+ auth_keycloak_url: http://localhost:8080/auth
+ auth_username: keycloak
+ auth_password: keycloak
+ auth_realm: master
+ config:
+ private_key: "{{ private_key }}"
+ certificate: "{{ certificate }}"
+ enabled: true
+ active: true
+ priority: 120
+ algorithm: RS256
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+
+end_state:
+ description: Representation of the keycloak_realm_key after module execution.
+ returned: on success
+ type: dict
+ contains:
+ id:
+ description: ID of the realm key.
+ type: str
+ returned: when O(state=present)
+ sample: 5b7ec13f-99da-46ad-8326-ab4c73cf4ce4
+ name:
+ description: Name of the realm key.
+ type: str
+ returned: when O(state=present)
+ sample: mykey
+ parentId:
+ description: ID of the realm this key belongs to.
+ type: str
+ returned: when O(state=present)
+ sample: myrealm
+ providerId:
+ description: The ID of the key provider.
+ type: str
+ returned: when O(state=present)
+ sample: rsa
+ providerType:
+ description: The type of provider.
+ type: str
+ returned: when O(state=present)
+ config:
+ description: Realm key configuration.
+ type: dict
+ returned: when O(state=present)
+ sample: {
+ "active": ["true"],
+ "algorithm": ["RS256"],
+ "enabled": ["true"],
+ "priority": ["140"]
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from copy import deepcopy
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+
+ meta_args = dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ name=dict(type='str', required=True),
+ force=dict(type='bool', default=False),
+ parent_id=dict(type='str', required=True),
+ provider_id=dict(type='str', default='rsa', choices=['rsa', 'rsa-enc']),
+ config=dict(
+ type='dict',
+ options=dict(
+ active=dict(type='bool', default=True),
+ enabled=dict(type='bool', default=True),
+ priority=dict(type='int', required=True),
+ algorithm=dict(
+ type="str",
+ default="RS256",
+ choices=[
+ "RS256",
+ "RS384",
+ "RS512",
+ "PS256",
+ "PS384",
+ "PS512",
+ "RSA1_5",
+ "RSA-OAEP",
+ "RSA-OAEP-256",
+ ],
+ ),
+ private_key=dict(type='str', required=True, no_log=True),
+ certificate=dict(type='str', required=True)
+ )
+ )
+ )
+
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
+ required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+
+ # Initialize the result object. Only "changed" seems to have special
+ # meaning for Ansible.
+ result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={}))
+
+ # This will include the current state of the realm key if it is already
+ # present. This is only used for diff-mode.
+ before_realm_key = {}
+ before_realm_key['config'] = {}
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(module.params)
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+
+ kc = KeycloakAPI(module, connection_header)
+
+ params_to_ignore = list(keycloak_argument_spec().keys()) + ["state", "force"]
+
+ # Filter and map the parameters names that apply to the role
+ component_params = [x for x in module.params
+ if x not in params_to_ignore and
+ module.params.get(x) is not None]
+
+ # We only support one component provider type in this module
+ provider_type = 'org.keycloak.keys.KeyProvider'
+
+ # Build a proposed changeset from parameters given to this module
+ changeset = {}
+ changeset['config'] = {}
+
+ # Generate a JSON payload for Keycloak Admin API from the module
+ # parameters. Parameters that do not belong to the JSON payload (e.g.
+ # "state" or "auth_keycloal_url") have been filtered away earlier (see
+ # above).
+ #
+ # This loop converts Ansible module parameters (snake-case) into
+ # Keycloak-compatible format (camel-case). For example private_key
+ # becomes privateKey.
+ #
+ # It also converts bool, str and int parameters into lists with a single
+ # entry of 'str' type. Bool values are also lowercased. This is required
+ # by Keycloak.
+ #
+ for component_param in component_params:
+ if component_param == 'config':
+ for config_param in module.params.get('config'):
+ changeset['config'][camel(config_param)] = []
+ raw_value = module.params.get('config')[config_param]
+ if isinstance(raw_value, bool):
+ value = str(raw_value).lower()
+ else:
+ value = str(raw_value)
+
+ changeset['config'][camel(config_param)].append(value)
+ else:
+ # No need for camelcase in here as these are one word parameters
+ new_param_value = module.params.get(component_param)
+ changeset[camel(component_param)] = new_param_value
+
+ # As provider_type is not a module parameter we have to add it to the
+ # changeset explicitly.
+ changeset['providerType'] = provider_type
+
+ # Make a deep copy of the changeset. This is use when determining
+ # changes to the current state.
+ changeset_copy = deepcopy(changeset)
+
+ # It is not possible to compare current keys to desired keys, because the
+ # certificate parameter is a base64-encoded binary blob created on the fly
+ # when a key is added. Moreover, the Keycloak Admin API does not seem to
+ # return the value of the private key for comparison. So, in effect, it we
+ # just have to ignore changes to the keys. However, as the privateKey
+ # parameter needs be present in the JSON payload, any changes done to any
+ # other parameters (e.g. config.priority) will trigger update of the keys
+ # as a side-effect.
+ del changeset_copy['config']['privateKey']
+ del changeset_copy['config']['certificate']
+
+ # Make it easier to refer to current module parameters
+ name = module.params.get('name')
+ force = module.params.get('force')
+ state = module.params.get('state')
+ enabled = module.params.get('enabled')
+ provider_id = module.params.get('provider_id')
+ parent_id = module.params.get('parent_id')
+
+ # Get a list of all Keycloak components that are of keyprovider type.
+ realm_keys = kc.get_components(urlencode(dict(type=provider_type, parent=parent_id)), parent_id)
+
+ # If this component is present get its key ID. Confusingly the key ID is
+ # also known as the Provider ID.
+ key_id = None
+
+ # Track individual parameter changes
+ changes = ""
+
+ # This tells Ansible whether the key was changed (added, removed, modified)
+ result['changed'] = False
+
+ # Loop through the list of components. If we encounter a component whose
+ # name matches the value of the name parameter then assume the key is
+ # already present.
+ for key in realm_keys:
+ if key['name'] == name:
+ key_id = key['id']
+ changeset['id'] = key_id
+ changeset_copy['id'] = key_id
+
+ # Compare top-level parameters
+ for param, value in changeset.items():
+ before_realm_key[param] = key[param]
+
+ if changeset_copy[param] != key[param] and param != 'config':
+ changes += "%s: %s -> %s, " % (param, key[param], changeset_copy[param])
+ result['changed'] = True
+
+ # Compare parameters under the "config" key
+ for p, v in changeset_copy['config'].items():
+ before_realm_key['config'][p] = key['config'][p]
+ if changeset_copy['config'][p] != key['config'][p]:
+ changes += "config.%s: %s -> %s, " % (p, key['config'][p], changeset_copy['config'][p])
+ result['changed'] = True
+
+ # Sanitize linefeeds for the privateKey. Without this the JSON payload
+ # will be invalid.
+ changeset['config']['privateKey'][0] = changeset['config']['privateKey'][0].replace('\\n', '\n')
+ changeset['config']['certificate'][0] = changeset['config']['certificate'][0].replace('\\n', '\n')
+
+ # Check all the possible states of the resource and do what is needed to
+ # converge current state with desired state (create, update or delete
+ # the key).
+ if key_id and state == 'present':
+ if result['changed']:
+ if module._diff:
+ del before_realm_key['config']['privateKey']
+ del before_realm_key['config']['certificate']
+ result['diff'] = dict(before=before_realm_key, after=changeset_copy)
+
+ if module.check_mode:
+ result['msg'] = "Realm key %s would be changed: %s" % (name, changes.strip(", "))
+ else:
+ kc.update_component(changeset, parent_id)
+ result['msg'] = "Realm key %s changed: %s" % (name, changes.strip(", "))
+ elif not result['changed'] and force:
+ kc.update_component(changeset, parent_id)
+ result['changed'] = True
+ result['msg'] = "Realm key %s was forcibly updated" % (name)
+ else:
+ result['msg'] = "Realm key %s was in sync" % (name)
+
+ result['end_state'] = changeset_copy
+ elif key_id and state == 'absent':
+ if module._diff:
+ del before_realm_key['config']['privateKey']
+ del before_realm_key['config']['certificate']
+ result['diff'] = dict(before=before_realm_key, after={})
+
+ if module.check_mode:
+ result['changed'] = True
+ result['msg'] = "Realm key %s would be deleted" % (name)
+ else:
+ kc.delete_component(key_id, parent_id)
+ result['changed'] = True
+ result['msg'] = "Realm key %s deleted" % (name)
+
+ result['end_state'] = {}
+ elif not key_id and state == 'present':
+ if module._diff:
+ result['diff'] = dict(before={}, after=changeset_copy)
+
+ if module.check_mode:
+ result['changed'] = True
+ result['msg'] = "Realm key %s would be created" % (name)
+ else:
+ kc.create_component(changeset, parent_id)
+ result['changed'] = True
+ result['msg'] = "Realm key %s created" % (name)
+
+ result['end_state'] = changeset_copy
+ elif not key_id and state == 'absent':
+ result['changed'] = False
+ result['msg'] = "Realm key %s not present" % (name)
+ result['end_state'] = {}
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_realm_rolemapping.py b/ansible_collections/community/general/plugins/modules/keycloak_realm_rolemapping.py
new file mode 100644
index 000000000..693cf9894
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/keycloak_realm_rolemapping.py
@@ -0,0 +1,391 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_realm_rolemapping
+
+short_description: Allows administration of Keycloak realm role mappings into groups with the Keycloak API
+
+version_added: 8.2.0
+
+description:
+ - This module allows you to add, remove or modify Keycloak realm role
+ mappings into groups with the Keycloak REST API. It requires access to the
+ REST API via OpenID Connect; the user connecting and the client being used
+ must have the requisite access rights. In a default Keycloak installation,
+ admin-cli and an admin user would work, as would a separate client
+ definition with the scope tailored to your needs and a user having the
+ expected roles.
+
+ - The names of module options are snake_cased versions of the camelCase ones found in the
+ Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/18.0/rest-api/index.html).
+
+ - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will
+ be returned that way by this module. You may pass single values for attributes when calling the module,
+ and this will be translated into a list suitable for the API.
+
+ - When updating a group_rolemapping, where possible provide the role ID to the module. This removes a lookup
+ to the API to translate the name into the role ID.
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+
+options:
+ state:
+ description:
+ - State of the realm_rolemapping.
+ - On C(present), the realm_rolemapping will be created if it does not yet exist, or updated with the parameters you provide.
+ - On C(absent), the realm_rolemapping will be removed if it exists.
+ default: 'present'
+ type: str
+ choices:
+ - present
+ - absent
+
+ realm:
+ type: str
+ description:
+ - They Keycloak realm under which this role_representation resides.
+ default: 'master'
+
+ group_name:
+ type: str
+ description:
+ - Name of the group to be mapped.
+ - This parameter is required (can be replaced by gid for less API call).
+
+ parents:
+ type: list
+ description:
+ - List of parent groups for the group to handle sorted top to bottom.
+ - >-
+ Set this if your group is a subgroup and you do not provide the GID in O(gid).
+ elements: dict
+ suboptions:
+ id:
+ type: str
+ description:
+ - Identify parent by ID.
+ - Needs less API calls than using O(parents[].name).
+ - A deep parent chain can be started at any point when first given parent is given as ID.
+ - Note that in principle both ID and name can be specified at the same time
+ but current implementation only always use just one of them, with ID
+ being preferred.
+ name:
+ type: str
+ description:
+ - Identify parent by name.
+ - Needs more internal API calls than using O(parents[].id) to map names to ID's under the hood.
+ - When giving a parent chain with only names it must be complete up to the top.
+ - Note that in principle both ID and name can be specified at the same time
+ but current implementation only always use just one of them, with ID
+ being preferred.
+ gid:
+ type: str
+ description:
+ - ID of the group to be mapped.
+ - This parameter is not required for updating or deleting the rolemapping but
+ providing it will reduce the number of API calls required.
+
+ roles:
+ description:
+ - Roles to be mapped to the group.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ type: str
+ description:
+ - Name of the role_representation.
+ - This parameter is required only when creating or updating the role_representation.
+ id:
+ type: str
+ description:
+ - The unique identifier for this role_representation.
+ - This parameter is not required for updating or deleting a role_representation but
+ providing it will reduce the number of API calls required.
+
+extends_documentation_fragment:
+ - community.general.keycloak
+ - community.general.attributes
+
+author:
+ - Gaëtan Daubresse (@Gaetan2907)
+ - Marius Huysamen (@mhuysamen)
+ - Alexander Groß (@agross)
+'''
+
+EXAMPLES = '''
+- name: Map a client role to a group, authentication with credentials
+ community.general.keycloak_realm_rolemapping:
+ realm: MyCustomRealm
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ state: present
+ group_name: group1
+ roles:
+ - name: role_name1
+ id: role_id1
+ - name: role_name2
+ id: role_id2
+ delegate_to: localhost
+
+- name: Map a client role to a group, authentication with token
+ community.general.keycloak_realm_rolemapping:
+ realm: MyCustomRealm
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ token: TOKEN
+ state: present
+ group_name: group1
+ roles:
+ - name: role_name1
+ id: role_id1
+ - name: role_name2
+ id: role_id2
+ delegate_to: localhost
+
+- name: Map a client role to a subgroup, authentication with token
+ community.general.keycloak_realm_rolemapping:
+ realm: MyCustomRealm
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ token: TOKEN
+ state: present
+ group_name: subgroup1
+ parents:
+ - name: parent-group
+ roles:
+ - name: role_name1
+ id: role_id1
+ - name: role_name2
+ id: role_id2
+ delegate_to: localhost
+
+- name: Unmap realm role from a group
+ community.general.keycloak_realm_rolemapping:
+ realm: MyCustomRealm
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ state: absent
+ group_name: group1
+ roles:
+ - name: role_name1
+ id: role_id1
+ - name: role_name2
+ id: role_id2
+ delegate_to: localhost
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+ sample: "Role role1 assigned to group group1."
+
+proposed:
+ description: Representation of proposed client role mapping.
+ returned: always
+ type: dict
+ sample: {
+ clientId: "test"
+ }
+
+existing:
+ description:
+ - Representation of existing client role mapping.
+ - The sample is truncated.
+ returned: always
+ type: dict
+ sample: {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256",
+ }
+ }
+
+end_state:
+ description:
+ - Representation of client role mapping after module execution.
+ - The sample is truncated.
+ returned: on success
+ type: dict
+ sample: {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256",
+ }
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import (
+ KeycloakAPI, keycloak_argument_spec, get_token, KeycloakError,
+)
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+
+ roles_spec = dict(
+ name=dict(type='str'),
+ id=dict(type='str'),
+ )
+
+ meta_args = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ realm=dict(default='master'),
+ gid=dict(type='str'),
+ group_name=dict(type='str'),
+ parents=dict(
+ type='list', elements='dict',
+ options=dict(
+ id=dict(type='str'),
+ name=dict(type='str')
+ ),
+ ),
+ roles=dict(type='list', elements='dict', options=roles_spec),
+ )
+
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
+ required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+
+ result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(module.params)
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+
+ kc = KeycloakAPI(module, connection_header)
+
+ realm = module.params.get('realm')
+ state = module.params.get('state')
+ gid = module.params.get('gid')
+ group_name = module.params.get('group_name')
+ roles = module.params.get('roles')
+ parents = module.params.get('parents')
+
+ # Check the parameters
+ if gid is None and group_name is None:
+ module.fail_json(msg='Either the `group_name` or `gid` has to be specified.')
+
+ # Get the potential missing parameters
+ if gid is None:
+ group_rep = kc.get_group_by_name(group_name, realm=realm, parents=parents)
+ if group_rep is not None:
+ gid = group_rep['id']
+ else:
+ module.fail_json(msg='Could not fetch group %s:' % group_name)
+ else:
+ group_rep = kc.get_group_by_groupid(gid, realm=realm)
+
+ if roles is None:
+ module.exit_json(msg="Nothing to do (no roles specified).")
+ else:
+ for role_index, role in enumerate(roles, start=0):
+ if role['name'] is None and role['id'] is None:
+ module.fail_json(msg='Either the `name` or `id` has to be specified on each role.')
+ # Fetch missing role_id
+ if role['id'] is None:
+ role_rep = kc.get_realm_role(role['name'], realm=realm)
+ if role_rep is not None:
+ role['id'] = role_rep['id']
+ else:
+ module.fail_json(msg='Could not fetch realm role %s by name:' % (role['name']))
+ # Fetch missing role_name
+ else:
+ for realm_role in kc.get_realm_roles(realm=realm):
+ if realm_role['id'] == role['id']:
+ role['name'] = realm_role['name']
+ break
+
+ if role['name'] is None:
+ module.fail_json(msg='Could not fetch realm role %s by ID' % (role['id']))
+
+ assigned_roles_before = group_rep.get('realmRoles', [])
+
+ result['existing'] = assigned_roles_before
+ result['proposed'] = list(assigned_roles_before) if assigned_roles_before else []
+
+ update_roles = []
+ for role_index, role in enumerate(roles, start=0):
+ # Fetch roles to assign if state present
+ if state == 'present':
+ if any(assigned == role['name'] for assigned in assigned_roles_before):
+ pass
+ else:
+ update_roles.append({
+ 'id': role['id'],
+ 'name': role['name'],
+ })
+ result['proposed'].append(role['name'])
+ # Fetch roles to remove if state absent
+ else:
+ if any(assigned == role['name'] for assigned in assigned_roles_before):
+ update_roles.append({
+ 'id': role['id'],
+ 'name': role['name'],
+ })
+ if role['name'] in result['proposed']: # Handle double removal
+ result['proposed'].remove(role['name'])
+
+ if len(update_roles):
+ result['changed'] = True
+ if module._diff:
+ result['diff'] = dict(before=assigned_roles_before, after=result['proposed'])
+ if module.check_mode:
+ module.exit_json(**result)
+
+ if state == 'present':
+ # Assign roles
+ kc.add_group_realm_rolemapping(gid=gid, role_rep=update_roles, realm=realm)
+ result['msg'] = 'Realm roles %s assigned to groupId %s.' % (update_roles, gid)
+ else:
+ # Remove mapping of role
+ kc.delete_group_realm_rolemapping(gid=gid, role_rep=update_roles, realm=realm)
+ result['msg'] = 'Realm roles %s removed from groupId %s.' % (update_roles, gid)
+
+ if gid is None:
+ assigned_roles_after = kc.get_group_by_name(group_name, realm=realm, parents=parents).get('realmRoles', [])
+ else:
+ assigned_roles_after = kc.get_group_by_groupid(gid, realm=realm).get('realmRoles', [])
+ result['end_state'] = assigned_roles_after
+ module.exit_json(**result)
+ # Do nothing
+ else:
+ result['changed'] = False
+ result['msg'] = 'Nothing to do, roles %s are %s with group %s.' % (roles, 'mapped' if state == 'present' else 'not mapped', group_name)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_role.py b/ansible_collections/community/general/plugins/modules/keycloak_role.py
index bbec5f591..f3e01483f 100644
--- a/ansible_collections/community/general/plugins/modules/keycloak_role.py
+++ b/ansible_collections/community/general/plugins/modules/keycloak_role.py
@@ -40,8 +40,8 @@ options:
state:
description:
- State of the role.
- - On C(present), the role will be created if it does not yet exist, or updated with the parameters you provide.
- - On C(absent), the role will be removed if it exists.
+ - On V(present), the role will be created if it does not yet exist, or updated with the parameters you provide.
+ - On V(absent), the role will be removed if it exists.
default: 'present'
type: str
choices:
@@ -77,6 +77,42 @@ options:
description:
- A dict of key/value pairs to set as custom attributes for the role.
- Values may be single values (e.g. a string) or a list of strings.
+ composite:
+ description:
+ - If V(true), the role is a composition of other realm and/or client role.
+ default: false
+ type: bool
+ version_added: 7.1.0
+ composites:
+ description:
+ - List of roles to include to the composite realm role.
+ - If the composite role is a client role, the C(clientId) (not ID of the client) must be specified.
+ default: []
+ type: list
+ elements: dict
+ version_added: 7.1.0
+ suboptions:
+ name:
+ description:
+ - Name of the role. This can be the name of a REALM role or a client role.
+ type: str
+ required: true
+ client_id:
+ description:
+ - Client ID if the role is a client role. Do not include this option for a REALM role.
+ - Use the client ID you can see in the Keycloak console, not the technical ID of the client.
+ type: str
+ required: false
+ aliases:
+ - clientId
+ state:
+ description:
+ - Create the composite if present, remove it if absent.
+ type: str
+ choices:
+ - present
+ - absent
+ default: present
extends_documentation_fragment:
- community.general.keycloak
@@ -198,8 +234,9 @@ end_state:
'''
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
- keycloak_argument_spec, get_token, KeycloakError
+ keycloak_argument_spec, get_token, KeycloakError, is_struct_included
from ansible.module_utils.basic import AnsibleModule
+import copy
def main():
@@ -210,6 +247,12 @@ def main():
"""
argument_spec = keycloak_argument_spec()
+ composites_spec = dict(
+ name=dict(type='str', required=True),
+ client_id=dict(type='str', aliases=['clientId'], required=False),
+ state=dict(type='str', default='present', choices=['present', 'absent'])
+ )
+
meta_args = dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
name=dict(type='str', required=True),
@@ -217,6 +260,8 @@ def main():
realm=dict(type='str', default='master'),
client_id=dict(type='str'),
attributes=dict(type='dict'),
+ composites=dict(type='list', default=[], options=composites_spec, elements='dict'),
+ composite=dict(type='bool', default=False),
)
argument_spec.update(meta_args)
@@ -250,7 +295,7 @@ def main():
# Filter and map the parameters names that apply to the role
role_params = [x for x in module.params
- if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'client_id', 'composites'] and
+ if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'client_id'] and
module.params.get(x) is not None]
# See if it already exists in Keycloak
@@ -269,10 +314,10 @@ def main():
new_param_value = module.params.get(param)
old_value = before_role[param] if param in before_role else None
if new_param_value != old_value:
- changeset[camel(param)] = new_param_value
+ changeset[camel(param)] = copy.deepcopy(new_param_value)
# Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis)
- desired_role = before_role.copy()
+ desired_role = copy.deepcopy(before_role)
desired_role.update(changeset)
result['proposed'] = changeset
@@ -309,6 +354,9 @@ def main():
kc.create_client_role(desired_role, clientid, realm)
after_role = kc.get_client_role(name, clientid, realm)
+ if after_role['composite']:
+ after_role['composites'] = kc.get_role_composites(rolerep=after_role, clientid=clientid, realm=realm)
+
result['end_state'] = after_role
result['msg'] = 'Role {name} has been created'.format(name=name)
@@ -316,10 +364,25 @@ def main():
else:
if state == 'present':
+ compare_exclude = []
+ if 'composites' in desired_role and isinstance(desired_role['composites'], list) and len(desired_role['composites']) > 0:
+ composites = kc.get_role_composites(rolerep=before_role, clientid=clientid, realm=realm)
+ before_role['composites'] = []
+ for composite in composites:
+ before_composite = {}
+ if composite['clientRole']:
+ composite_client = kc.get_client_by_id(id=composite['containerId'], realm=realm)
+ before_composite['client_id'] = composite_client['clientId']
+ else:
+ before_composite['client_id'] = None
+ before_composite['name'] = composite['name']
+ before_composite['state'] = 'present'
+ before_role['composites'].append(before_composite)
+ else:
+ compare_exclude.append('composites')
# Process an update
-
# no changes
- if desired_role == before_role:
+ if is_struct_included(desired_role, before_role, exclude=compare_exclude):
result['changed'] = False
result['end_state'] = desired_role
result['msg'] = "No changes required to role {name}.".format(name=name)
@@ -341,6 +404,8 @@ def main():
else:
kc.update_client_role(desired_role, clientid, realm)
after_role = kc.get_client_role(name, clientid, realm)
+ if after_role['composite']:
+ after_role['composites'] = kc.get_role_composites(rolerep=after_role, clientid=clientid, realm=realm)
result['end_state'] = after_role
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_user.py b/ansible_collections/community/general/plugins/modules/keycloak_user.py
new file mode 100644
index 000000000..1aeff0da5
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/keycloak_user.py
@@ -0,0 +1,542 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, INSPQ (@elfelip)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: keycloak_user
+short_description: Create and configure a user in Keycloak
+description:
+ - This module creates, removes, or updates Keycloak users.
+version_added: 7.1.0
+options:
+ auth_username:
+ aliases: []
+ realm:
+ description:
+ - The name of the realm in which is the client.
+ default: master
+ type: str
+ username:
+ description:
+ - Username for the user.
+ required: true
+ type: str
+ id:
+ description:
+ - ID of the user on the Keycloak server if known.
+ type: str
+ enabled:
+ description:
+ - Enabled user.
+ type: bool
+ email_verified:
+ description:
+ - Check the validity of user email.
+ default: false
+ type: bool
+ aliases:
+ - emailVerified
+ first_name:
+ description:
+ - The user's first name.
+ required: false
+ type: str
+ aliases:
+ - firstName
+ last_name:
+ description:
+ - The user's last name.
+ required: false
+ type: str
+ aliases:
+ - lastName
+ email:
+ description:
+ - User email.
+ required: false
+ type: str
+ federation_link:
+ description:
+ - Federation Link.
+ required: false
+ type: str
+ aliases:
+ - federationLink
+ service_account_client_id:
+ description:
+ - Description of the client Application.
+ required: false
+ type: str
+ aliases:
+ - serviceAccountClientId
+ client_consents:
+ description:
+ - Client Authenticator Type.
+ type: list
+ elements: dict
+ default: []
+ aliases:
+ - clientConsents
+ suboptions:
+ client_id:
+ description:
+ - Client ID of the client role. Not the technical ID of the client.
+ type: str
+ required: true
+ aliases:
+ - clientId
+ roles:
+ description:
+ - List of client roles to assign to the user.
+ type: list
+ required: true
+ elements: str
+ groups:
+ description:
+ - List of groups for the user.
+ type: list
+ elements: dict
+ default: []
+ suboptions:
+ name:
+ description:
+ - Name of the group.
+ type: str
+ state:
+ description:
+ - Control whether the user must be member of this group or not.
+ choices: [ "present", "absent" ]
+ default: present
+ type: str
+ credentials:
+ description:
+ - User credentials.
+ default: []
+ type: list
+ elements: dict
+ suboptions:
+ type:
+ description:
+ - Credential type.
+ type: str
+ required: true
+ value:
+ description:
+ - Value of the credential.
+ type: str
+ required: true
+ temporary:
+ description:
+ - If V(true), the users are required to reset their credentials at next login.
+ type: bool
+ default: false
+ required_actions:
+ description:
+ - RequiredActions user Auth.
+ default: []
+ type: list
+ elements: str
+ aliases:
+ - requiredActions
+ federated_identities:
+ description:
+ - List of IDPs of user.
+ default: []
+ type: list
+ elements: str
+ aliases:
+ - federatedIdentities
+ attributes:
+ description:
+ - List of user attributes.
+ required: false
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Name of the attribute.
+ type: str
+ values:
+ description:
+ - Values for the attribute as list.
+ type: list
+ elements: str
+ state:
+ description:
+ - Control whether the attribute must exists or not.
+ choices: [ "present", "absent" ]
+ default: present
+ type: str
+ access:
+ description:
+ - list user access.
+ required: false
+ type: dict
+ disableable_credential_types:
+ description:
+ - list user Credential Type.
+ default: []
+ type: list
+ elements: str
+ aliases:
+ - disableableCredentialTypes
+ origin:
+ description:
+ - user origin.
+ required: false
+ type: str
+ self:
+ description:
+ - user self administration.
+ required: false
+ type: str
+ state:
+ description:
+ - Control whether the user should exists or not.
+ choices: [ "present", "absent" ]
+ default: present
+ type: str
+ force:
+ description:
+ - If V(true), allows to remove user and recreate it.
+ type: bool
+ default: false
+extends_documentation_fragment:
+ - community.general.keycloak
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+notes:
+ - The module does not modify the user ID of an existing user.
+author:
+ - Philippe Gauthier (@elfelip)
+'''
+
+EXAMPLES = '''
+- name: Create a user user1
+ community.general.keycloak_user:
+ auth_keycloak_url: http://localhost:8080/auth
+ auth_username: admin
+ auth_password: password
+ realm: master
+ username: user1
+ firstName: user1
+ lastName: user1
+ email: user1
+ enabled: true
+ emailVerified: false
+ credentials:
+ - type: password
+ value: password
+ temporary: false
+ attributes:
+ - name: attr1
+ values:
+ - value1
+ state: present
+ - name: attr2
+ values:
+ - value2
+ state: absent
+ groups:
+ - name: group1
+ state: present
+ state: present
+
+- name: Re-create a User
+ community.general.keycloak_user:
+ auth_keycloak_url: http://localhost:8080/auth
+ auth_username: admin
+ auth_password: password
+ realm: master
+ username: user1
+ firstName: user1
+ lastName: user1
+ email: user1
+ enabled: true
+ emailVerified: false
+ credentials:
+ - type: password
+ value: password
+ temporary: false
+ attributes:
+ - name: attr1
+ values:
+ - value1
+ state: present
+ - name: attr2
+ values:
+ - value2
+ state: absent
+ groups:
+ - name: group1
+ state: present
+ state: present
+
+- name: Re-create a User
+ community.general.keycloak_user:
+ auth_keycloak_url: http://localhost:8080/auth
+ auth_username: admin
+ auth_password: password
+ realm: master
+ username: user1
+ firstName: user1
+ lastName: user1
+ email: user1
+ enabled: true
+ emailVerified: false
+ credentials:
+ - type: password
+ value: password
+ temporary: false
+ attributes:
+ - name: attr1
+ values:
+ - value1
+ state: present
+ - name: attr2
+ values:
+ - value2
+ state: absent
+ groups:
+ - name: group1
+ state: present
+ state: present
+ force: true
+
+- name: Remove User
+ community.general.keycloak_user:
+ auth_keycloak_url: http://localhost:8080/auth
+ auth_username: admin
+ auth_password: password
+ realm: master
+ username: user1
+ state: absent
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+ sample: User f18c709c-03d6-11ee-970b-c74bf2721112 created
+proposed:
+ description: Representation of the proposed user.
+ returned: on success
+ type: dict
+existing:
+ description: Representation of the existing user.
+ returned: on success
+ type: dict
+end_state:
+ description: Representation of the user after module execution
+ returned: on success
+ type: dict
+changed:
+ description: Return V(true) if the operation changed the user on the keycloak server, V(false) otherwise.
+ returned: always
+ type: bool
+'''
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
+ keycloak_argument_spec, get_token, KeycloakError, is_struct_included
+from ansible.module_utils.basic import AnsibleModule
+import copy
+
+
+def main():
+ argument_spec = keycloak_argument_spec()
+ argument_spec['auth_username']['aliases'] = []
+ credential_spec = dict(
+ type=dict(type='str', required=True),
+ value=dict(type='str', required=True),
+ temporary=dict(type='bool', default=False)
+ )
+ client_consents_spec = dict(
+ client_id=dict(type='str', required=True, aliases=['clientId']),
+ roles=dict(type='list', elements='str', required=True)
+ )
+ attributes_spec = dict(
+ name=dict(type='str'),
+ values=dict(type='list', elements='str'),
+ state=dict(type='str', choices=['present', 'absent'], default='present')
+ )
+ groups_spec = dict(
+ name=dict(type='str'),
+ state=dict(type='str', choices=['present', 'absent'], default='present')
+ )
+ meta_args = dict(
+ realm=dict(type='str', default='master'),
+ self=dict(type='str'),
+ id=dict(type='str'),
+ username=dict(type='str', required=True),
+ first_name=dict(type='str', aliases=['firstName']),
+ last_name=dict(type='str', aliases=['lastName']),
+ email=dict(type='str'),
+ enabled=dict(type='bool'),
+ email_verified=dict(type='bool', default=False, aliases=['emailVerified']),
+ federation_link=dict(type='str', aliases=['federationLink']),
+ service_account_client_id=dict(type='str', aliases=['serviceAccountClientId']),
+ attributes=dict(type='list', elements='dict', options=attributes_spec),
+ access=dict(type='dict'),
+ groups=dict(type='list', default=[], elements='dict', options=groups_spec),
+ disableable_credential_types=dict(type='list', default=[], aliases=['disableableCredentialTypes'], elements='str'),
+ required_actions=dict(type='list', default=[], aliases=['requiredActions'], elements='str'),
+ credentials=dict(type='list', default=[], elements='dict', options=credential_spec),
+ federated_identities=dict(type='list', default=[], aliases=['federatedIdentities'], elements='str'),
+ client_consents=dict(type='list', default=[], aliases=['clientConsents'], elements='dict', options=client_consents_spec),
+ origin=dict(type='str'),
+ state=dict(choices=["absent", "present"], default='present'),
+ force=dict(type='bool', default=False),
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
+ required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+
+ result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(module.params)
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+
+ kc = KeycloakAPI(module, connection_header)
+
+ realm = module.params.get('realm')
+ state = module.params.get('state')
+ force = module.params.get('force')
+ username = module.params.get('username')
+ groups = module.params.get('groups')
+
+ # Filter and map the parameters names that apply to the user
+ user_params = [x for x in module.params
+ if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'force', 'groups'] and
+ module.params.get(x) is not None]
+
+ before_user = kc.get_user_by_username(username=username, realm=realm)
+
+ if before_user is None:
+ before_user = {}
+
+ changeset = {}
+
+ for param in user_params:
+ new_param_value = module.params.get(param)
+ if param == 'attributes' and param in before_user:
+ old_value = kc.convert_keycloak_user_attributes_dict_to_module_list(attributes=before_user['attributes'])
+ else:
+ old_value = before_user[param] if param in before_user else None
+ if new_param_value != old_value:
+ if old_value is not None and param == 'attributes':
+ for old_attribute in old_value:
+ old_attribute_found = False
+ for new_attribute in new_param_value:
+ if new_attribute['name'] == old_attribute['name']:
+ old_attribute_found = True
+ if not old_attribute_found:
+ new_param_value.append(copy.deepcopy(old_attribute))
+ if isinstance(new_param_value, dict):
+ changeset[camel(param)] = copy.deepcopy(new_param_value)
+ else:
+ changeset[camel(param)] = new_param_value
+ # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis)
+ desired_user = copy.deepcopy(before_user)
+ desired_user.update(changeset)
+
+ result['proposed'] = changeset
+ result['existing'] = before_user
+
+ changed = False
+
+ # Cater for when it doesn't exist (an empty dict)
+ if state == 'absent':
+ if not before_user:
+ # Do nothing and exit
+ if module._diff:
+ result['diff'] = dict(before='', after='')
+ result['changed'] = False
+ result['end_state'] = {}
+ result['msg'] = 'Role does not exist, doing nothing.'
+ module.exit_json(**result)
+ else:
+ # Delete user
+ kc.delete_user(user_id=before_user['id'], realm=realm)
+ result["msg"] = 'User %s deleted' % (before_user['username'])
+ changed = True
+
+ else:
+ after_user = {}
+ if force and before_user: # If the force option is set to true
+ # Delete the existing user
+ kc.delete_user(user_id=before_user["id"], realm=realm)
+
+ if not before_user or force:
+ # Process a creation
+ changed = True
+
+ if username is None:
+ module.fail_json(msg='username must be specified when creating a new user')
+
+ if module._diff:
+ result['diff'] = dict(before='', after=desired_user)
+
+ if module.check_mode:
+ module.exit_json(**result)
+ # Create the user
+ after_user = kc.create_user(userrep=desired_user, realm=realm)
+ result["msg"] = 'User %s created' % (desired_user['username'])
+ # Add user ID to new representation
+ desired_user['id'] = after_user["id"]
+ else:
+ excludes = [
+ "access",
+ "notBefore",
+ "createdTimestamp",
+ "totp",
+ "credentials",
+ "disableableCredentialTypes",
+ "groups",
+ "clientConsents",
+ "federatedIdentities",
+ "requiredActions"]
+ # Add user ID to new representation
+ desired_user['id'] = before_user["id"]
+
+ # Compare users
+ if not (is_struct_included(desired_user, before_user, excludes)): # If the new user does not introduce a change to the existing user
+ # Update the user
+ after_user = kc.update_user(userrep=desired_user, realm=realm)
+ changed = True
+
+ # set user groups
+ if kc.update_user_groups_membership(userrep=desired_user, groups=groups, realm=realm):
+ changed = True
+ # Get the user groups
+ after_user["groups"] = kc.get_user_groups(user_id=desired_user["id"], realm=realm)
+ result["end_state"] = after_user
+ if changed:
+ result["msg"] = 'User %s updated' % (desired_user['username'])
+ else:
+ result["msg"] = 'No changes made for user %s' % (desired_user['username'])
+
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_user_federation.py b/ansible_collections/community/general/plugins/modules/keycloak_user_federation.py
index c0dc5d271..fee0d1265 100644
--- a/ansible_collections/community/general/plugins/modules/keycloak_user_federation.py
+++ b/ansible_collections/community/general/plugins/modules/keycloak_user_federation.py
@@ -36,9 +36,9 @@ options:
state:
description:
- State of the user federation.
- - On C(present), the user federation will be created if it does not yet exist, or updated with
+ - On V(present), the user federation will be created if it does not yet exist, or updated with
the parameters you provide.
- - On C(absent), the user federation will be removed if it exists.
+ - On V(absent), the user federation will be removed if it exists.
default: 'present'
type: str
choices:
@@ -54,7 +54,7 @@ options:
id:
description:
- The unique ID for this user federation. If left empty, the user federation will be searched
- by its I(name).
+ by its O(name).
type: str
name:
@@ -64,18 +64,15 @@ options:
provider_id:
description:
- - Provider for this user federation.
+ - Provider for this user federation. Built-in providers are V(ldap), V(kerberos), and V(sssd).
+ Custom user storage providers can also be used.
aliases:
- providerId
type: str
- choices:
- - ldap
- - kerberos
- - sssd
provider_type:
description:
- - Component type for user federation (only supported value is C(org.keycloak.storage.UserStorageProvider)).
+ - Component type for user federation (only supported value is V(org.keycloak.storage.UserStorageProvider)).
aliases:
- providerType
default: org.keycloak.storage.UserStorageProvider
@@ -91,10 +88,10 @@ options:
config:
description:
- Dict specifying the configuration options for the provider; the contents differ depending on
- the value of I(provider_id). Examples are given below for C(ldap), C(kerberos) and C(sssd).
+ the value of O(provider_id). Examples are given below for V(ldap), V(kerberos) and V(sssd).
It is easiest to obtain valid config values by dumping an already-existing user federation
- configuration through check-mode in the I(existing) field.
- - The value C(sssd) has been supported since community.general 4.2.0.
+ configuration through check-mode in the RV(existing) field.
+ - The value V(sssd) has been supported since community.general 4.2.0.
type: dict
suboptions:
enabled:
@@ -111,15 +108,15 @@ options:
importEnabled:
description:
- - If C(true), LDAP users will be imported into Keycloak DB and synced by the configured
+ - If V(true), LDAP users will be imported into Keycloak DB and synced by the configured
sync policies.
default: true
type: bool
editMode:
description:
- - C(READ_ONLY) is a read-only LDAP store. C(WRITABLE) means data will be synced back to LDAP
- on demand. C(UNSYNCED) means user data will be imported, but not synced back to LDAP.
+ - V(READ_ONLY) is a read-only LDAP store. V(WRITABLE) means data will be synced back to LDAP
+ on demand. V(UNSYNCED) means user data will be imported, but not synced back to LDAP.
type: str
choices:
- READ_ONLY
@@ -136,13 +133,13 @@ options:
vendor:
description:
- LDAP vendor (provider).
- - Use short name. For instance, write C(rhds) for "Red Hat Directory Server".
+ - Use short name. For instance, write V(rhds) for "Red Hat Directory Server".
type: str
usernameLDAPAttribute:
description:
- Name of LDAP attribute, which is mapped as Keycloak username. For many LDAP server
- vendors it can be C(uid). For Active directory it can be C(sAMAccountName) or C(cn).
+ vendors it can be V(uid). For Active directory it can be V(sAMAccountName) or V(cn).
The attribute should be filled for all LDAP user records you want to import from
LDAP to Keycloak.
type: str
@@ -151,15 +148,15 @@ options:
description:
- Name of LDAP attribute, which is used as RDN (top attribute) of typical user DN.
Usually it's the same as Username LDAP attribute, however it is not required. For
- example for Active directory, it is common to use C(cn) as RDN attribute when
- username attribute might be C(sAMAccountName).
+ example for Active directory, it is common to use V(cn) as RDN attribute when
+ username attribute might be V(sAMAccountName).
type: str
uuidLDAPAttribute:
description:
- Name of LDAP attribute, which is used as unique object identifier (UUID) for objects
- in LDAP. For many LDAP server vendors, it is C(entryUUID); however some are different.
- For example for Active directory it should be C(objectGUID). If your LDAP server does
+ in LDAP. For many LDAP server vendors, it is V(entryUUID); however some are different.
+ For example for Active directory it should be V(objectGUID). If your LDAP server does
not support the notion of UUID, you can use any other attribute that is supposed to
be unique among LDAP users in tree.
type: str
@@ -167,7 +164,7 @@ options:
userObjectClasses:
description:
- All values of LDAP objectClass attribute for users in LDAP divided by comma.
- For example C(inetOrgPerson, organizationalPerson). Newly created Keycloak users
+ For example V(inetOrgPerson, organizationalPerson). Newly created Keycloak users
will be written to LDAP with all those object classes and existing LDAP user records
are found just if they contain all those object classes.
type: str
@@ -251,8 +248,8 @@ options:
useTruststoreSpi:
description:
- Specifies whether LDAP connection will use the truststore SPI with the truststore
- configured in standalone.xml/domain.xml. C(Always) means that it will always use it.
- C(Never) means that it will not use it. C(Only for ldaps) means that it will use if
+ configured in standalone.xml/domain.xml. V(always) means that it will always use it.
+ V(never) means that it will not use it. V(ldapsOnly) means that it will use if
your connection URL use ldaps. Note even if standalone.xml/domain.xml is not
configured, the default Java cacerts or certificate specified by
C(javax.net.ssl.trustStore) property will be used.
@@ -297,7 +294,7 @@ options:
connectionPoolingDebug:
description:
- A string that indicates the level of debug output to produce. Example valid values are
- C(fine) (trace connection creation and removal) and C(all) (all debugging information).
+ V(fine) (trace connection creation and removal) and V(all) (all debugging information).
type: str
connectionPoolingInitSize:
@@ -321,7 +318,7 @@ options:
connectionPoolingProtocol:
description:
- A list of space-separated protocol types of connections that may be pooled.
- Valid types are C(plain) and C(ssl).
+ Valid types are V(plain) and V(ssl).
type: str
connectionPoolingTimeout:
@@ -342,17 +339,27 @@ options:
- Name of kerberos realm.
type: str
+ krbPrincipalAttribute:
+ description:
+ - Name of the LDAP attribute, which refers to Kerberos principal.
+ This is used to lookup appropriate LDAP user after successful Kerberos/SPNEGO authentication in Keycloak.
+ When this is empty, the LDAP user will be looked based on LDAP username corresponding
+ to the first part of his Kerberos principal. For instance, for principal C(john@KEYCLOAK.ORG),
+ it will assume that LDAP username is V(john).
+ type: str
+ version_added: 8.1.0
+
serverPrincipal:
description:
- Full name of server principal for HTTP service including server and domain name. For
- example C(HTTP/host.foo.org@FOO.ORG). Use C(*) to accept any service principal in the
+ example V(HTTP/host.foo.org@FOO.ORG). Use V(*) to accept any service principal in the
KeyTab file.
type: str
keyTab:
description:
- Location of Kerberos KeyTab file containing the credentials of server principal. For
- example C(/etc/krb5.keytab).
+ example V(/etc/krb5.keytab).
type: str
debug:
@@ -451,7 +458,7 @@ options:
providerId:
description:
- - The mapper type for this mapper (for instance C(user-attribute-ldap-mapper)).
+ - The mapper type for this mapper (for instance V(user-attribute-ldap-mapper)).
type: str
providerType:
@@ -464,6 +471,7 @@ options:
description:
- Dict specifying the configuration options for the mapper; the contents differ
depending on the value of I(identityProviderMapper).
+ # TODO: what is identityProviderMapper above???
type: dict
extends_documentation_fragment:
@@ -763,6 +771,7 @@ def main():
readTimeout=dict(type='int'),
searchScope=dict(type='str', choices=['1', '2'], default='1'),
serverPrincipal=dict(type='str'),
+ krbPrincipalAttribute=dict(type='str'),
startTls=dict(type='bool', default=False),
syncRegistrations=dict(type='bool', default=False),
trustEmail=dict(type='bool', default=False),
@@ -793,7 +802,7 @@ def main():
realm=dict(type='str', default='master'),
id=dict(type='str'),
name=dict(type='str'),
- provider_id=dict(type='str', aliases=['providerId'], choices=['ldap', 'kerberos', 'sssd']),
+ provider_id=dict(type='str', aliases=['providerId']),
provider_type=dict(type='str', aliases=['providerType'], default='org.keycloak.storage.UserStorageProvider'),
parent_id=dict(type='str', aliases=['parentId']),
mappers=dict(type='list', elements='dict', options=mapper_spec),
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_user_rolemapping.py b/ansible_collections/community/general/plugins/modules/keycloak_user_rolemapping.py
index d754e313a..59727a346 100644
--- a/ansible_collections/community/general/plugins/modules/keycloak_user_rolemapping.py
+++ b/ansible_collections/community/general/plugins/modules/keycloak_user_rolemapping.py
@@ -42,8 +42,8 @@ options:
state:
description:
- State of the user_rolemapping.
- - On C(present), the user_rolemapping will be created if it does not yet exist, or updated with the parameters you provide.
- - On C(absent), the user_rolemapping will be removed if it exists.
+ - On V(present), the user_rolemapping will be created if it does not yet exist, or updated with the parameters you provide.
+ - On V(absent), the user_rolemapping will be removed if it exists.
default: 'present'
type: str
choices:
@@ -79,8 +79,8 @@ options:
client_id:
type: str
description:
- - Name of the client to be mapped (different than I(cid)).
- - This parameter is required if I(cid) is not provided (can be replaced by I(cid)
+ - Name of the client to be mapped (different than O(cid)).
+ - This parameter is required if O(cid) is not provided (can be replaced by O(cid)
to reduce the number of API calls that must be made).
cid:
diff --git a/ansible_collections/community/general/plugins/modules/keyring.py b/ansible_collections/community/general/plugins/modules/keyring.py
index ada22ed58..8329b727b 100644
--- a/ansible_collections/community/general/plugins/modules/keyring.py
+++ b/ansible_collections/community/general/plugins/modules/keyring.py
@@ -106,7 +106,7 @@ def del_passphrase(module):
try:
keyring.delete_password(module.params["service"], module.params["username"])
return None
- except keyring.errors.KeyringLocked as keyring_locked_err: # pylint: disable=unused-variable
+ except keyring.errors.KeyringLocked:
delete_argument = (
'echo "%s" | gnome-keyring-daemon --unlock\nkeyring del %s %s\n'
% (
@@ -140,7 +140,7 @@ def set_passphrase(module):
module.params["user_password"],
)
return None
- except keyring.errors.KeyringLocked as keyring_locked_err: # pylint: disable=unused-variable
+ except keyring.errors.KeyringLocked:
set_argument = (
'echo "%s" | gnome-keyring-daemon --unlock\nkeyring set %s %s\n%s\n'
% (
diff --git a/ansible_collections/community/general/plugins/modules/kibana_plugin.py b/ansible_collections/community/general/plugins/modules/kibana_plugin.py
index a52eda2fd..f6744b396 100644
--- a/ansible_collections/community/general/plugins/modules/kibana_plugin.py
+++ b/ansible_collections/community/general/plugins/modules/kibana_plugin.py
@@ -60,7 +60,7 @@ options:
version:
description:
- Version of the plugin to be installed.
- - If plugin exists with previous version, plugin will NOT be updated unless C(force) is set to yes.
+ - If plugin exists with previous version, plugin will B(not) be updated unless O(force) is set to V(true).
type: str
force:
description:
diff --git a/ansible_collections/community/general/plugins/modules/launchd.py b/ansible_collections/community/general/plugins/modules/launchd.py
index 13a8ce086..e5942ea7c 100644
--- a/ansible_collections/community/general/plugins/modules/launchd.py
+++ b/ansible_collections/community/general/plugins/modules/launchd.py
@@ -32,14 +32,14 @@ options:
required: true
state:
description:
- - C(started)/C(stopped) are idempotent actions that will not run
+ - V(started)/V(stopped) are idempotent actions that will not run
commands unless necessary.
- - Launchd does not support C(restarted) nor C(reloaded) natively.
+ - Launchd does not support V(restarted) nor V(reloaded) natively.
These will trigger a stop/start (restarted) or an unload/load
(reloaded).
- - C(restarted) unloads and loads the service before start to ensure
+ - V(restarted) unloads and loads the service before start to ensure
that the latest job definition (plist) is used.
- - C(reloaded) unloads and loads the service to ensure that the latest
+ - V(reloaded) unloads and loads the service to ensure that the latest
job definition (plist) is used. Whether a service is started or
stopped depends on the content of the definition file.
type: str
@@ -54,7 +54,7 @@ options:
- Whether the service should not be restarted automatically by launchd.
- Services might have the 'KeepAlive' attribute set to true in a launchd configuration.
In case this is set to true, stopping a service will cause that launchd starts the service again.
- - Set this option to C(true) to let this module change the 'KeepAlive' attribute to false.
+ - Set this option to V(true) to let this module change the 'KeepAlive' attribute to V(false).
type: bool
default: false
notes:
diff --git a/ansible_collections/community/general/plugins/modules/layman.py b/ansible_collections/community/general/plugins/modules/layman.py
index 940ac30d1..13d514274 100644
--- a/ansible_collections/community/general/plugins/modules/layman.py
+++ b/ansible_collections/community/general/plugins/modules/layman.py
@@ -19,7 +19,6 @@ description:
- Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux.
Please note that Layman must be installed on a managed node prior using this module.
requirements:
- - "python >= 2.6"
- layman python module
extends_documentation_fragment:
- community.general.attributes
@@ -32,27 +31,27 @@ options:
name:
description:
- The overlay id to install, synchronize, or uninstall.
- Use 'ALL' to sync all of the installed overlays (can be used only when I(state=updated)).
+ Use 'ALL' to sync all of the installed overlays (can be used only when O(state=updated)).
required: true
type: str
list_url:
description:
- An URL of the alternative overlays list that defines the overlay to install.
- This list will be fetched and saved under C(${overlay_defs})/${name}.xml), where
- C(overlay_defs) is readed from the Layman's configuration.
+ This list will be fetched and saved under C(${overlay_defs}/${name}.xml), where
+ C(overlay_defs) is read from the Layman's configuration.
aliases: [url]
type: str
state:
description:
- - Whether to install (C(present)), sync (C(updated)), or uninstall (C(absent)) the overlay.
+ - Whether to install (V(present)), sync (V(updated)), or uninstall (V(absent)) the overlay.
default: present
choices: [present, absent, updated]
type: str
validate_certs:
description:
- - If C(false), SSL certificates will not be validated. This should only be
- set to C(false) when no other option exists. Prior to 1.9.3 the code
- defaulted to C(false).
+ - If V(false), SSL certificates will not be validated. This should only be
+ set to V(false) when no other option exists. Prior to 1.9.3 the code
+ defaulted to V(false).
type: bool
default: true
'''
diff --git a/ansible_collections/community/general/plugins/modules/ldap_attrs.py b/ansible_collections/community/general/plugins/modules/ldap_attrs.py
index c2cac8644..7986833a6 100644
--- a/ansible_collections/community/general/plugins/modules/ldap_attrs.py
+++ b/ansible_collections/community/general/plugins/modules/ldap_attrs.py
@@ -25,10 +25,10 @@ notes:
bind over a UNIX domain socket. This works well with the default Ubuntu
install for example, which includes a cn=peercred,cn=external,cn=auth ACL
rule allowing root to modify the server configuration. If you need to use
- a simple bind to access your server, pass the credentials in I(bind_dn)
- and I(bind_pw).
- - For I(state=present) and I(state=absent), all value comparisons are
- performed on the server for maximum accuracy. For I(state=exact), values
+ a simple bind to access your server, pass the credentials in O(bind_dn)
+ and O(bind_pw).
+ - For O(state=present) and O(state=absent), all value comparisons are
+ performed on the server for maximum accuracy. For O(state=exact), values
have to be compared in Python, which obviously ignores LDAP matching
rules. This should work out in most cases, but it is theoretically
possible to see spurious changes when target and actual values are
@@ -44,7 +44,8 @@ attributes:
check_mode:
support: full
diff_mode:
- support: none
+ support: full
+ version_added: 8.5.0
options:
state:
required: false
@@ -52,11 +53,11 @@ options:
choices: [present, absent, exact]
default: present
description:
- - The state of the attribute values. If C(present), all given attribute
- values will be added if they're missing. If C(absent), all given
- attribute values will be removed if present. If C(exact), the set of
+ - The state of the attribute values. If V(present), all given attribute
+ values will be added if they're missing. If V(absent), all given
+ attribute values will be removed if present. If V(exact), the set of
attribute values will be forced to exactly those provided and no others.
- If I(state=exact) and the attribute I(value) is empty, all values for
+ If O(state=exact) and the attribute value is empty, all values for
this attribute will be removed.
attributes:
required: true
@@ -69,16 +70,16 @@ options:
readability for long string values by using YAML block modifiers as seen in the
examples for this module.
- Note that when using values that YAML/ansible-core interprets as other types,
- like C(yes), C(no) (booleans), or C(2.10) (float), make sure to quote them if
+ like V(yes), V(no) (booleans), or V(2.10) (float), make sure to quote them if
these are meant to be strings. Otherwise the wrong values may be sent to LDAP.
ordered:
required: false
type: bool
default: false
description:
- - If C(true), prepend list values with X-ORDERED index numbers in all
+ - If V(true), prepend list values with X-ORDERED index numbers in all
attributes specified in the current task. This is useful mostly with
- I(olcAccess) attribute to easily manage LDAP Access Control Lists.
+ C(olcAccess) attribute to easily manage LDAP Access Control Lists.
extends_documentation_fragment:
- community.general.ldap.documentation
- community.general.attributes
@@ -182,7 +183,7 @@ import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.text.converters import to_native, to_bytes, to_text
-from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs, ldap_required_together
import re
@@ -207,7 +208,7 @@ class LdapAttrs(LdapGeneric):
self.ordered = self.module.params['ordered']
def _order_values(self, values):
- """ Preprend X-ORDERED index numbers to attribute's values. """
+ """ Prepend X-ORDERED index numbers to attribute's values. """
ordered_values = []
if isinstance(values, list):
@@ -235,26 +236,38 @@ class LdapAttrs(LdapGeneric):
def add(self):
modlist = []
+ new_attrs = {}
for name, values in self.module.params['attributes'].items():
norm_values = self._normalize_values(values)
+ added_values = []
for value in norm_values:
if self._is_value_absent(name, value):
modlist.append((ldap.MOD_ADD, name, value))
-
- return modlist
+ added_values.append(value)
+ if added_values:
+ new_attrs[name] = norm_values
+ return modlist, {}, new_attrs
def delete(self):
modlist = []
+ old_attrs = {}
+ new_attrs = {}
for name, values in self.module.params['attributes'].items():
norm_values = self._normalize_values(values)
+ removed_values = []
for value in norm_values:
if self._is_value_present(name, value):
+ removed_values.append(value)
modlist.append((ldap.MOD_DELETE, name, value))
-
- return modlist
+ if removed_values:
+ old_attrs[name] = norm_values
+ new_attrs[name] = [value for value in norm_values if value not in removed_values]
+ return modlist, old_attrs, new_attrs
def exact(self):
modlist = []
+ old_attrs = {}
+ new_attrs = {}
for name, values in self.module.params['attributes'].items():
norm_values = self._normalize_values(values)
try:
@@ -272,8 +285,13 @@ class LdapAttrs(LdapGeneric):
modlist.append((ldap.MOD_DELETE, name, None))
else:
modlist.append((ldap.MOD_REPLACE, name, norm_values))
+ old_attrs[name] = current
+ new_attrs[name] = norm_values
+ if len(current) == 1 and len(norm_values) == 1:
+ old_attrs[name] = current[0]
+ new_attrs[name] = norm_values[0]
- return modlist
+ return modlist, old_attrs, new_attrs
def _is_value_present(self, name, value):
""" True if the target attribute has the given value. """
@@ -300,6 +318,7 @@ def main():
state=dict(type='str', default='present', choices=['absent', 'exact', 'present']),
),
supports_check_mode=True,
+ required_together=ldap_required_together(),
)
if not HAS_LDAP:
@@ -308,16 +327,18 @@ def main():
# Instantiate the LdapAttr object
ldap = LdapAttrs(module)
+ old_attrs = None
+ new_attrs = None
state = module.params['state']
# Perform action
if state == 'present':
- modlist = ldap.add()
+ modlist, old_attrs, new_attrs = ldap.add()
elif state == 'absent':
- modlist = ldap.delete()
+ modlist, old_attrs, new_attrs = ldap.delete()
elif state == 'exact':
- modlist = ldap.exact()
+ modlist, old_attrs, new_attrs = ldap.exact()
changed = False
@@ -330,7 +351,7 @@ def main():
except Exception as e:
module.fail_json(msg="Attribute action failed.", details=to_native(e))
- module.exit_json(changed=changed, modlist=modlist)
+ module.exit_json(changed=changed, modlist=modlist, diff={"before": old_attrs, "after": new_attrs})
if __name__ == '__main__':
diff --git a/ansible_collections/community/general/plugins/modules/ldap_entry.py b/ansible_collections/community/general/plugins/modules/ldap_entry.py
index 619bbf927..5deaf7c4c 100644
--- a/ansible_collections/community/general/plugins/modules/ldap_entry.py
+++ b/ansible_collections/community/general/plugins/modules/ldap_entry.py
@@ -24,8 +24,8 @@ notes:
bind over a UNIX domain socket. This works well with the default Ubuntu
install for example, which includes a cn=peercred,cn=external,cn=auth ACL
rule allowing root to modify the server configuration. If you need to use
- a simple bind to access your server, pass the credentials in I(bind_dn)
- and I(bind_pw).
+ a simple bind to access your server, pass the credentials in O(bind_dn)
+ and O(bind_pw).
author:
- Jiri Tyr (@jtyr)
requirements:
@@ -38,7 +38,7 @@ attributes:
options:
attributes:
description:
- - If I(state=present), attributes necessary to create an entry. Existing
+ - If O(state=present), attributes necessary to create an entry. Existing
entries are never modified. To assert specific attribute values on an
existing entry, use M(community.general.ldap_attrs) module instead.
- Each attribute value can be a string for single-valued attributes or
@@ -47,13 +47,13 @@ options:
readability for long string values by using YAML block modifiers as seen in the
examples for this module.
- Note that when using values that YAML/ansible-core interprets as other types,
- like C(yes), C(no) (booleans), or C(2.10) (float), make sure to quote them if
+ like V(yes), V(no) (booleans), or V(2.10) (float), make sure to quote them if
these are meant to be strings. Otherwise the wrong values may be sent to LDAP.
type: dict
default: {}
objectClass:
description:
- - If I(state=present), value or list of values to use when creating
+ - If O(state=present), value or list of values to use when creating
the entry. It can either be a string or an actual list of
strings.
type: list
@@ -66,7 +66,7 @@ options:
type: str
recursive:
description:
- - If I(state=delete), a flag indicating whether a single entry or the
+ - If O(state=delete), a flag indicating whether a single entry or the
whole branch must be deleted.
type: bool
default: false
@@ -151,7 +151,7 @@ import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.text.converters import to_native, to_bytes
-from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs, ldap_required_together
LDAP_IMP_ERR = None
try:
@@ -213,7 +213,7 @@ class LdapEntry(LdapGeneric):
self.connection.delete_s(self.dn)
def _delete_recursive():
- """ Attempt recurive deletion using the subtree-delete control.
+ """ Attempt recursive deletion using the subtree-delete control.
If that fails, do it manually. """
try:
subtree_delete = ldap.controls.ValueLessRequestControl('1.2.840.113556.1.4.805')
@@ -255,6 +255,7 @@ def main():
),
required_if=[('state', 'present', ['objectClass'])],
supports_check_mode=True,
+ required_together=ldap_required_together(),
)
if not HAS_LDAP:
diff --git a/ansible_collections/community/general/plugins/modules/ldap_passwd.py b/ansible_collections/community/general/plugins/modules/ldap_passwd.py
index f47fa330e..5044586b0 100644
--- a/ansible_collections/community/general/plugins/modules/ldap_passwd.py
+++ b/ansible_collections/community/general/plugins/modules/ldap_passwd.py
@@ -20,10 +20,10 @@ description:
notes:
- The default authentication settings will attempt to use a SASL EXTERNAL
bind over a UNIX domain socket. This works well with the default Ubuntu
- install for example, which includes a cn=peercred,cn=external,cn=auth ACL
+ install for example, which includes a C(cn=peercred,cn=external,cn=auth) ACL
rule allowing root to modify the server configuration. If you need to use
- a simple bind to access your server, pass the credentials in I(bind_dn)
- and I(bind_pw).
+ a simple bind to access your server, pass the credentials in O(bind_dn)
+ and O(bind_pw).
author:
- Keller Fuchs (@KellerFuchs)
requirements:
@@ -36,7 +36,7 @@ attributes:
options:
passwd:
description:
- - The (plaintext) password to be set for I(dn).
+ - The (plaintext) password to be set for O(dn).
type: str
extends_documentation_fragment:
- community.general.ldap.documentation
@@ -72,7 +72,7 @@ modlist:
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs, ldap_required_together
LDAP_IMP_ERR = None
try:
@@ -133,6 +133,7 @@ def main():
module = AnsibleModule(
argument_spec=gen_specs(passwd=dict(no_log=True)),
supports_check_mode=True,
+ required_together=ldap_required_together(),
)
if not HAS_LDAP:
diff --git a/ansible_collections/community/general/plugins/modules/ldap_search.py b/ansible_collections/community/general/plugins/modules/ldap_search.py
index ad79a2d73..45744e634 100644
--- a/ansible_collections/community/general/plugins/modules/ldap_search.py
+++ b/ansible_collections/community/general/plugins/modules/ldap_search.py
@@ -21,8 +21,8 @@ notes:
bind over a UNIX domain socket. This works well with the default Ubuntu
install for example, which includes a C(cn=peercred,cn=external,cn=auth) ACL
rule allowing root to modify the server configuration. If you need to use
- a simple bind to access your server, pass the credentials in I(bind_dn)
- and I(bind_pw).
+ a simple bind to access your server, pass the credentials in O(bind_dn)
+ and O(bind_pw).
author:
- Sebastian Pfahl (@eryx12o45)
requirements:
@@ -59,8 +59,27 @@ options:
default: false
type: bool
description:
- - Set to C(true) to return the full attribute schema of entries, not
- their attribute values. Overrides I(attrs) when provided.
+ - Set to V(true) to return the full attribute schema of entries, not
+ their attribute values. Overrides O(attrs) when provided.
+ page_size:
+ default: 0
+ type: int
+ description:
+ - The page size when performing a simple paged result search (RFC 2696).
+ This setting can be tuned to reduce issues with timeouts and server limits.
+ - Setting the page size to V(0) (default) disables paged searching.
+ version_added: 7.1.0
+ base64_attributes:
+ description:
+ - If provided, all attribute values returned that are listed in this option
+ will be Base64 encoded.
+ - If the special value V(*) appears in this list, all attributes will be
+ Base64 encoded.
+ - All other attribute values will be converted to UTF-8 strings. If they
+ contain binary data, please note that invalid UTF-8 bytes will be omitted.
+ type: list
+ elements: str
+ version_added: 7.0.0
extends_documentation_fragment:
- community.general.ldap.documentation
- community.general.attributes
@@ -81,11 +100,28 @@ EXAMPLES = r"""
register: ldap_group_gids
"""
+RESULTS = """
+results:
+ description:
+ - For every entry found, one dictionary will be returned.
+ - Every dictionary contains a key C(dn) with the entry's DN as a value.
+ - Every attribute of the entry found is added to the dictionary. If the key
+ has precisely one value, that value is taken directly, otherwise the key's
+ value is a list.
+ - Note that all values (for single-element lists) and list elements (for multi-valued
+ lists) will be UTF-8 strings. Some might contain Base64-encoded binary data; which
+ ones is determined by the O(base64_attributes) option.
+ type: list
+ elements: dict
+"""
+
+import base64
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible.module_utils.common.text.converters import to_native
-from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
+from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
+from ansible.module_utils.six import binary_type, string_types, text_type
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs, ldap_required_together
LDAP_IMP_ERR = None
try:
@@ -105,8 +141,11 @@ def main():
filter=dict(type='str', default='(objectClass=*)'),
attrs=dict(type='list', elements='str'),
schema=dict(type='bool', default=False),
+ page_size=dict(type='int', default=0),
+ base64_attributes=dict(type='list', elements='str'),
),
supports_check_mode=True,
+ required_together=ldap_required_together(),
)
if not HAS_LDAP:
@@ -118,16 +157,30 @@ def main():
except Exception as exception:
module.fail_json(msg="Attribute action failed.", details=to_native(exception))
- module.exit_json(changed=False)
+
+def _normalize_string(val, convert_to_base64):
+ if isinstance(val, (string_types, binary_type)):
+ if isinstance(val, text_type):
+ val = to_bytes(val, encoding='utf-8')
+ if convert_to_base64:
+ val = to_text(base64.b64encode(val))
+ else:
+ # See https://github.com/ansible/ansible/issues/80258#issuecomment-1477038952 for details.
+ # We want to make sure that all strings are properly UTF-8 encoded, even if they were not,
+ # or happened to be byte strings.
+ val = to_text(val, 'utf-8', errors='replace')
+ # See also https://github.com/ansible-collections/community.general/issues/5704.
+ return val
-def _extract_entry(dn, attrs):
+def _extract_entry(dn, attrs, base64_attributes):
extracted = {'dn': dn}
for attr, val in list(attrs.items()):
+ convert_to_base64 = '*' in base64_attributes or attr in base64_attributes
if len(val) == 1:
- extracted[attr] = val[0]
+ extracted[attr] = _normalize_string(val[0], convert_to_base64)
else:
- extracted[attr] = val
+ extracted[attr] = [_normalize_string(v, convert_to_base64) for v in val]
return extracted
@@ -137,12 +190,14 @@ class LdapSearch(LdapGeneric):
self.filterstr = self.module.params['filter']
self.attrlist = []
+ self.page_size = self.module.params['page_size']
self._load_scope()
self._load_attrs()
self._load_schema()
+ self._base64_attributes = set(self.module.params['base64_attributes'] or [])
def _load_schema(self):
- self.schema = self.module.boolean(self.module.params['schema'])
+ self.schema = self.module.params['schema']
if self.schema:
self.attrsonly = 1
else:
@@ -165,22 +220,32 @@ class LdapSearch(LdapGeneric):
self.module.exit_json(changed=False, results=results)
def perform_search(self):
+ ldap_entries = []
+ controls = []
+ if self.page_size > 0:
+ controls.append(ldap.controls.libldap.SimplePagedResultsControl(True, size=self.page_size, cookie=''))
try:
- results = self.connection.search_s(
- self.dn,
- self.scope,
- filterstr=self.filterstr,
- attrlist=self.attrlist,
- attrsonly=self.attrsonly
- )
- ldap_entries = []
- for result in results:
- if isinstance(result[1], dict):
- if self.schema:
- ldap_entries.append(dict(dn=result[0], attrs=list(result[1].keys())))
- else:
- ldap_entries.append(_extract_entry(result[0], result[1]))
- return ldap_entries
+ while True:
+ response = self.connection.search_ext(
+ self.dn,
+ self.scope,
+ filterstr=self.filterstr,
+ attrlist=self.attrlist,
+ attrsonly=self.attrsonly,
+ serverctrls=controls,
+ )
+ rtype, results, rmsgid, serverctrls = self.connection.result3(response)
+ for result in results:
+ if isinstance(result[1], dict):
+ if self.schema:
+ ldap_entries.append(dict(dn=result[0], attrs=list(result[1].keys())))
+ else:
+ ldap_entries.append(_extract_entry(result[0], result[1], self._base64_attributes))
+ cookies = [c.cookie for c in serverctrls if c.controlType == ldap.controls.libldap.SimplePagedResultsControl.controlType]
+ if self.page_size > 0 and cookies and cookies[0]:
+ controls[0].cookie = cookies[0]
+ else:
+ return ldap_entries
except ldap.NO_SUCH_OBJECT:
self.module.fail_json(msg="Base not found: {0}".format(self.dn))
diff --git a/ansible_collections/community/general/plugins/modules/linode.py b/ansible_collections/community/general/plugins/modules/linode.py
index 404e7a393..9e04ac63d 100644
--- a/ansible_collections/community/general/plugins/modules/linode.py
+++ b/ansible_collections/community/general/plugins/modules/linode.py
@@ -31,7 +31,7 @@ options:
api_key:
description:
- Linode API key.
- - C(LINODE_API_KEY) env variable can be used instead.
+ - E(LINODE_API_KEY) environment variable can be used instead.
type: str
required: true
name:
@@ -124,7 +124,7 @@ options:
private_ip:
description:
- Add private IPv4 address when Linode is created.
- - Default is C(false).
+ - Default is V(false).
type: bool
ssh_pub_key:
description:
@@ -149,7 +149,7 @@ options:
type: int
wait:
description:
- - wait for the instance to be in state C(running) before returning
+ - wait for the instance to be in state V(running) before returning
type: bool
default: true
wait_timeout:
@@ -163,7 +163,6 @@ options:
type: bool
default: true
requirements:
- - python >= 2.6
- linode-python
author:
- Vincent Viallet (@zbal)
diff --git a/ansible_collections/community/general/plugins/modules/linode_v4.py b/ansible_collections/community/general/plugins/modules/linode_v4.py
index f213af125..da885f3a5 100644
--- a/ansible_collections/community/general/plugins/modules/linode_v4.py
+++ b/ansible_collections/community/general/plugins/modules/linode_v4.py
@@ -14,7 +14,6 @@ module: linode_v4
short_description: Manage instances on the Linode cloud
description: Manage instances on the Linode cloud.
requirements:
- - python >= 2.7
- linode_api4 >= 2.0.0
author:
- Luke Murphy (@decentral1se)
@@ -62,7 +61,7 @@ options:
type: str
private_ip:
description:
- - If C(true), the created Linode will have private networking enabled and
+ - If V(true), the created Linode will have private networking enabled and
assigned a private IPv4 address.
type: bool
default: false
@@ -95,7 +94,7 @@ options:
access_token:
description:
- The Linode API v4 access token. It may also be specified by exposing
- the C(LINODE_ACCESS_TOKEN) environment variable. See
+ the E(LINODE_ACCESS_TOKEN) environment variable. See
U(https://www.linode.com/docs/api#access-and-authentication).
required: true
type: str
diff --git a/ansible_collections/community/general/plugins/modules/listen_ports_facts.py b/ansible_collections/community/general/plugins/modules/listen_ports_facts.py
index bc630e1d2..08030a8b3 100644
--- a/ansible_collections/community/general/plugins/modules/listen_ports_facts.py
+++ b/ansible_collections/community/general/plugins/modules/listen_ports_facts.py
@@ -40,7 +40,8 @@ options:
include_non_listening:
description:
- Show both listening and non-listening sockets (for TCP this means established connections).
- - Adds the return values C(state) and C(foreign_address) to the returned facts.
+ - Adds the return values RV(ansible_facts.tcp_listen[].state), RV(ansible_facts.udp_listen[].state),
+ RV(ansible_facts.tcp_listen[].foreign_address), and RV(ansible_facts.udp_listen[].foreign_address) to the returned facts.
type: bool
default: false
version_added: 5.4.0
@@ -96,13 +97,13 @@ ansible_facts:
sample: "0.0.0.0"
foreign_address:
description: The address of the remote end of the socket.
- returned: if I(include_non_listening=true)
+ returned: if O(include_non_listening=true)
type: str
sample: "10.80.0.1"
version_added: 5.4.0
state:
description: The state of the socket.
- returned: if I(include_non_listening=true)
+ returned: if O(include_non_listening=true)
type: str
sample: "ESTABLISHED"
version_added: 5.4.0
@@ -148,13 +149,13 @@ ansible_facts:
sample: "0.0.0.0"
foreign_address:
description: The address of the remote end of the socket.
- returned: if I(include_non_listening=true)
+ returned: if O(include_non_listening=true)
type: str
sample: "10.80.0.1"
version_added: 5.4.0
state:
description: The state of the socket. UDP is a connectionless protocol. Shows UCONN or ESTAB.
- returned: if I(include_non_listening=true)
+ returned: if O(include_non_listening=true)
type: str
sample: "UCONN"
version_added: 5.4.0
@@ -199,7 +200,7 @@ from ansible.module_utils.basic import AnsibleModule
def split_pid_name(pid_name):
"""
Split the entry PID/Program name into the PID (int) and the name (str)
- :param pid_name: PID/Program String seperated with a dash. E.g 51/sshd: returns pid = 51 and name = sshd
+ :param pid_name: PID/Program String separated with a dash. E.g 51/sshd: returns pid = 51 and name = sshd
:return: PID (int) and the program name (str)
"""
try:
diff --git a/ansible_collections/community/general/plugins/modules/locale_gen.py b/ansible_collections/community/general/plugins/modules/locale_gen.py
index fccdf977a..0dd76c9ab 100644
--- a/ansible_collections/community/general/plugins/modules/locale_gen.py
+++ b/ansible_collections/community/general/plugins/modules/locale_gen.py
@@ -35,6 +35,8 @@ options:
- Whether the locale shall be present.
choices: [ absent, present ]
default: present
+notes:
+ - This module does not support RHEL-based systems.
'''
EXAMPLES = '''
@@ -46,154 +48,31 @@ EXAMPLES = '''
import os
import re
-from subprocess import Popen, PIPE, call
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.common.text.converters import to_native
-
-LOCALE_NORMALIZATION = {
- ".utf8": ".UTF-8",
- ".eucjp": ".EUC-JP",
- ".iso885915": ".ISO-8859-15",
- ".cp1251": ".CP1251",
- ".koi8r": ".KOI8-R",
- ".armscii8": ".ARMSCII-8",
- ".euckr": ".EUC-KR",
- ".gbk": ".GBK",
- ".gb18030": ".GB18030",
- ".euctw": ".EUC-TW",
-}
-
-
-# ===========================================
-# location module specific support methods.
-#
-
-def is_available(name, ubuntuMode):
- """Check if the given locale is available on the system. This is done by
- checking either :
- * if the locale is present in /etc/locales.gen
- * or if the locale is present in /usr/share/i18n/SUPPORTED"""
- if ubuntuMode:
- __regexp = r'^(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
- __locales_available = '/usr/share/i18n/SUPPORTED'
- else:
- __regexp = r'^#{0,1}\s*(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
- __locales_available = '/etc/locale.gen'
-
- re_compiled = re.compile(__regexp)
- fd = open(__locales_available, 'r')
- for line in fd:
- result = re_compiled.match(line)
- if result and result.group('locale') == name:
- return True
- fd.close()
- return False
-
-
-def is_present(name):
- """Checks if the given locale is currently installed."""
- output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0]
- output = to_native(output)
- return any(fix_case(name) == fix_case(line) for line in output.splitlines())
-
-
-def fix_case(name):
- """locale -a might return the encoding in either lower or upper case.
- Passing through this function makes them uniform for comparisons."""
- for s, r in LOCALE_NORMALIZATION.items():
- name = name.replace(s, r)
- return name
-
-
-def replace_line(existing_line, new_line):
- """Replaces lines in /etc/locale.gen"""
- try:
- f = open("/etc/locale.gen", "r")
- lines = [line.replace(existing_line, new_line) for line in f]
- finally:
- f.close()
- try:
- f = open("/etc/locale.gen", "w")
- f.write("".join(lines))
- finally:
- f.close()
-
-
-def set_locale(name, enabled=True):
- """ Sets the state of the locale. Defaults to enabled. """
- search_string = r'#{0,1}\s*%s (?P<charset>.+)' % name
- if enabled:
- new_string = r'%s \g<charset>' % (name)
- else:
- new_string = r'# %s \g<charset>' % (name)
- try:
- f = open("/etc/locale.gen", "r")
- lines = [re.sub(search_string, new_string, line) for line in f]
- finally:
- f.close()
- try:
- f = open("/etc/locale.gen", "w")
- f.write("".join(lines))
- finally:
- f.close()
-
-
-def apply_change(targetState, name):
- """Create or remove locale.
-
- Keyword arguments:
- targetState -- Desired state, either present or absent.
- name -- Name including encoding such as de_CH.UTF-8.
- """
- if targetState == "present":
- # Create locale.
- set_locale(name, enabled=True)
- else:
- # Delete locale.
- set_locale(name, enabled=False)
-
- localeGenExitValue = call("locale-gen")
- if localeGenExitValue != 0:
- raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue))
-
-
-def apply_change_ubuntu(targetState, name):
- """Create or remove locale.
-
- Keyword arguments:
- targetState -- Desired state, either present or absent.
- name -- Name including encoding such as de_CH.UTF-8.
- """
- if targetState == "present":
- # Create locale.
- # Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local
- localeGenExitValue = call(["locale-gen", name])
- else:
- # Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales.
- try:
- f = open("/var/lib/locales/supported.d/local", "r")
- content = f.readlines()
- finally:
- f.close()
- try:
- f = open("/var/lib/locales/supported.d/local", "w")
- for line in content:
- locale, charset = line.split(' ')
- if locale != name:
- f.write(line)
- finally:
- f.close()
- # Purge locales and regenerate.
- # Please provide a patch if you know how to avoid regenerating the locales to keep!
- localeGenExitValue = call(["locale-gen", "--purge"])
-
- if localeGenExitValue != 0:
- raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue))
-
-def main():
- module = AnsibleModule(
+from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper
+from ansible_collections.community.general.plugins.module_utils.mh.deco import check_mode_skip
+
+from ansible_collections.community.general.plugins.module_utils.locale_gen import locale_runner, locale_gen_runner
+
+
+class LocaleGen(StateModuleHelper):
+ LOCALE_NORMALIZATION = {
+ ".utf8": ".UTF-8",
+ ".eucjp": ".EUC-JP",
+ ".iso885915": ".ISO-8859-15",
+ ".cp1251": ".CP1251",
+ ".koi8r": ".KOI8-R",
+ ".armscii8": ".ARMSCII-8",
+ ".euckr": ".EUC-KR",
+ ".gbk": ".GBK",
+ ".gb18030": ".GB18030",
+ ".euctw": ".EUC-TW",
+ }
+ LOCALE_GEN = "/etc/locale.gen"
+ LOCALE_SUPPORTED = "/var/lib/locales/supported.d/"
+
+ output_params = ["name"]
+ module = dict(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
@@ -201,42 +80,133 @@ def main():
supports_check_mode=True,
)
- name = module.params['name']
- state = module.params['state']
-
- if not os.path.exists("/var/lib/locales/supported.d/"):
- if os.path.exists("/etc/locale.gen"):
- # We found the common way to manage locales.
- ubuntuMode = False
+ def __init_module__(self):
+ self.vars.set("ubuntu_mode", False)
+ if os.path.exists(self.LOCALE_SUPPORTED):
+ self.vars.ubuntu_mode = True
+ else:
+ if not os.path.exists(self.LOCALE_GEN):
+ self.do_raise("{0} and {1} are missing. Is the package \"locales\" installed?".format(
+ self.LOCALE_SUPPORTED, self.LOCALE_GEN
+ ))
+
+ if not self.is_available():
+ self.do_raise("The locale you've entered is not available on your system.")
+
+ self.vars.set("is_present", self.is_present(), output=False)
+ self.vars.set("state_tracking", self._state_name(self.vars.is_present), output=False, change=True)
+
+ def __quit_module__(self):
+ self.vars.state_tracking = self._state_name(self.is_present())
+
+ @staticmethod
+ def _state_name(present):
+ return "present" if present else "absent"
+
+ def is_available(self):
+ """Check if the given locale is available on the system. This is done by
+ checking either :
+ * if the locale is present in /etc/locales.gen
+ * or if the locale is present in /usr/share/i18n/SUPPORTED"""
+ __regexp = r'^#?\s*(?P<locale>\S+[\._\S]+) (?P<charset>\S+)\s*$'
+ if self.vars.ubuntu_mode:
+ __locales_available = '/usr/share/i18n/SUPPORTED'
+ else:
+ __locales_available = '/etc/locale.gen'
+
+ re_compiled = re.compile(__regexp)
+ with open(__locales_available, 'r') as fd:
+ lines = fd.readlines()
+ res = [re_compiled.match(line) for line in lines]
+ if self.verbosity >= 4:
+ self.vars.available_lines = lines
+ if any(r.group("locale") == self.vars.name for r in res if r):
+ return True
+ # locale may be installed but not listed in the file, for example C.UTF-8 in some systems
+ return self.is_present()
+
+ def is_present(self):
+ runner = locale_runner(self.module)
+ with runner() as ctx:
+ rc, out, err = ctx.run()
+ if self.verbosity >= 4:
+ self.vars.locale_run_info = ctx.run_info
+ return any(self.fix_case(self.vars.name) == self.fix_case(line) for line in out.splitlines())
+
+ def fix_case(self, name):
+ """locale -a might return the encoding in either lower or upper case.
+ Passing through this function makes them uniform for comparisons."""
+ for s, r in self.LOCALE_NORMALIZATION.items():
+ name = name.replace(s, r)
+ return name
+
+ def set_locale(self, name, enabled=True):
+ """ Sets the state of the locale. Defaults to enabled. """
+ search_string = r'#?\s*%s (?P<charset>.+)' % re.escape(name)
+ if enabled:
+ new_string = r'%s \g<charset>' % (name)
else:
- module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package \"locales\" installed?")
- else:
- # Ubuntu created its own system to manage locales.
- ubuntuMode = True
-
- if not is_available(name, ubuntuMode):
- module.fail_json(msg="The locale you've entered is not available "
- "on your system.")
-
- if is_present(name):
- prev_state = "present"
- else:
- prev_state = "absent"
- changed = (prev_state != state)
-
- if module.check_mode:
- module.exit_json(changed=changed)
- else:
- if changed:
- try:
- if ubuntuMode is False:
- apply_change(state, name)
- else:
- apply_change_ubuntu(state, name)
- except EnvironmentError as e:
- module.fail_json(msg=to_native(e), exitValue=e.errno)
-
- module.exit_json(name=name, changed=changed, msg="OK")
+ new_string = r'# %s \g<charset>' % (name)
+ re_search = re.compile(search_string)
+ with open("/etc/locale.gen", "r") as fr:
+ lines = [re_search.sub(new_string, line) for line in fr]
+ with open("/etc/locale.gen", "w") as fw:
+ fw.write("".join(lines))
+
+ def apply_change(self, targetState, name):
+ """Create or remove locale.
+
+ Keyword arguments:
+ targetState -- Desired state, either present or absent.
+ name -- Name including encoding such as de_CH.UTF-8.
+ """
+
+ self.set_locale(name, enabled=(targetState == "present"))
+
+ runner = locale_gen_runner(self.module)
+ with runner() as ctx:
+ ctx.run()
+
+ def apply_change_ubuntu(self, targetState, name):
+ """Create or remove locale.
+
+ Keyword arguments:
+ targetState -- Desired state, either present or absent.
+ name -- Name including encoding such as de_CH.UTF-8.
+ """
+ runner = locale_gen_runner(self.module)
+
+ if targetState == "present":
+ # Create locale.
+ # Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local
+ with runner() as ctx:
+ ctx.run()
+ else:
+ # Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales.
+ with open("/var/lib/locales/supported.d/local", "r") as fr:
+ content = fr.readlines()
+ with open("/var/lib/locales/supported.d/local", "w") as fw:
+ for line in content:
+ locale, charset = line.split(' ')
+ if locale != name:
+ fw.write(line)
+ # Purge locales and regenerate.
+ # Please provide a patch if you know how to avoid regenerating the locales to keep!
+ with runner("purge") as ctx:
+ ctx.run()
+
+ @check_mode_skip
+ def __state_fallback__(self):
+ if self.vars.state_tracking == self.vars.state:
+ return
+ if self.vars.ubuntu_mode:
+ self.apply_change_ubuntu(self.vars.state, self.vars.name)
+ else:
+ self.apply_change(self.vars.state, self.vars.name)
+
+
+def main():
+ LocaleGen.execute()
if __name__ == '__main__':
diff --git a/ansible_collections/community/general/plugins/modules/lvg.py b/ansible_collections/community/general/plugins/modules/lvg.py
index 60eaaa42b..8a6384369 100644
--- a/ansible_collections/community/general/plugins/modules/lvg.py
+++ b/ansible_collections/community/general/plugins/modules/lvg.py
@@ -39,10 +39,10 @@ options:
elements: str
pesize:
description:
- - "The size of the physical extent. I(pesize) must be a power of 2 of at least 1 sector
+ - "The size of the physical extent. O(pesize) must be a power of 2 of at least 1 sector
(where the sector size is the largest sector size of the PVs currently used in the VG),
or at least 128KiB."
- - Since Ansible 2.6, pesize can be optionally suffixed by a UNIT (k/K/m/M/g/G), default unit is megabyte.
+ - O(pesize) can be optionally suffixed by a UNIT (k/K/m/M/g/G), default unit is megabyte.
type: str
default: "4"
pv_options:
@@ -52,7 +52,7 @@ options:
default: ''
pvresize:
description:
- - If C(true), resize the physical volume to the maximum available size.
+ - If V(true), resize the physical volume to the maximum available size.
type: bool
default: false
version_added: '0.2.0'
@@ -63,15 +63,33 @@ options:
default: ''
state:
description:
- - Control if the volume group exists.
+ - Control if the volume group exists and it's state.
+ - The states V(active) and V(inactive) implies V(present) state. Added in 7.1.0
+ - "If V(active) or V(inactive), the module manages the VG's logical volumes current state.
+ The module also handles the VG's autoactivation state if supported
+ unless when creating a volume group and the autoactivation option specified in O(vg_options)."
type: str
- choices: [ absent, present ]
+ choices: [ absent, present, active, inactive ]
default: present
force:
description:
- - If C(true), allows to remove volume group with logical volumes.
+ - If V(true), allows to remove volume group with logical volumes.
type: bool
default: false
+ reset_vg_uuid:
+ description:
+ - Whether the volume group's UUID is regenerated.
+ - This is B(not idempotent). Specifying this parameter always results in a change.
+ type: bool
+ default: false
+ version_added: 7.1.0
+ reset_pv_uuid:
+ description:
+ - Whether the volume group's physical volumes' UUIDs are regenerated.
+ - This is B(not idempotent). Specifying this parameter always results in a change.
+ type: bool
+ default: false
+ version_added: 7.1.0
seealso:
- module: community.general.filesystem
- module: community.general.lvol
@@ -112,6 +130,30 @@ EXAMPLES = r'''
vg: resizableVG
pvs: /dev/sda3
pvresize: true
+
+- name: Deactivate a volume group
+ community.general.lvg:
+ state: inactive
+ vg: vg.services
+
+- name: Activate a volume group
+ community.general.lvg:
+ state: active
+ vg: vg.services
+
+- name: Reset a volume group UUID
+ community.general.lvg:
+ state: inactive
+ vg: vg.services
+ reset_vg_uuid: true
+
+- name: Reset both volume group and pv UUID
+ community.general.lvg:
+ state: inactive
+ vg: vg.services
+ pvs: /dev/sdb1,/dev/sdc5
+ reset_vg_uuid: true
+ reset_pv_uuid: true
'''
import itertools
@@ -119,6 +161,8 @@ import os
from ansible.module_utils.basic import AnsibleModule
+VG_AUTOACTIVATION_OPT = '--setautoactivation'
+
def parse_vgs(data):
vgs = []
@@ -156,6 +200,178 @@ def parse_pvs(module, data):
return pvs
+def find_vg(module, vg):
+ if not vg:
+ return None
+ vgs_cmd = module.get_bin_path('vgs', True)
+ dummy, current_vgs, dummy = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd, check_rc=True)
+
+ vgs = parse_vgs(current_vgs)
+
+ for test_vg in vgs:
+ if test_vg['name'] == vg:
+ this_vg = test_vg
+ break
+ else:
+ this_vg = None
+
+ return this_vg
+
+
+def is_autoactivation_supported(module, vg_cmd):
+ autoactivation_supported = False
+ dummy, vgchange_opts, dummy = module.run_command([vg_cmd, '--help'], check_rc=True)
+
+ if VG_AUTOACTIVATION_OPT in vgchange_opts:
+ autoactivation_supported = True
+
+ return autoactivation_supported
+
+
+def activate_vg(module, vg, active):
+ changed = False
+ vgchange_cmd = module.get_bin_path('vgchange', True)
+ vgs_cmd = module.get_bin_path('vgs', True)
+ vgs_fields = ['lv_attr']
+
+ autoactivation_enabled = False
+ autoactivation_supported = is_autoactivation_supported(module=module, vg_cmd=vgchange_cmd)
+
+ if autoactivation_supported:
+ vgs_fields.append('autoactivation')
+
+ vgs_cmd_with_opts = [vgs_cmd, '--noheadings', '-o', ','.join(vgs_fields), '--separator', ';', vg]
+ dummy, current_vg_lv_states, dummy = module.run_command(vgs_cmd_with_opts, check_rc=True)
+
+ lv_active_count = 0
+ lv_inactive_count = 0
+
+ for line in current_vg_lv_states.splitlines():
+ parts = line.strip().split(';')
+ if parts[0][4] == 'a':
+ lv_active_count += 1
+ else:
+ lv_inactive_count += 1
+ if autoactivation_supported:
+ autoactivation_enabled = autoactivation_enabled or parts[1] == 'enabled'
+
+ activate_flag = None
+ if active and lv_inactive_count > 0:
+ activate_flag = 'y'
+ elif not active and lv_active_count > 0:
+ activate_flag = 'n'
+
+ # Extra logic necessary because vgchange returns error when autoactivation is already set
+ if autoactivation_supported:
+ if active and not autoactivation_enabled:
+ if module.check_mode:
+ changed = True
+ else:
+ module.run_command([vgchange_cmd, VG_AUTOACTIVATION_OPT, 'y', vg], check_rc=True)
+ changed = True
+ elif not active and autoactivation_enabled:
+ if module.check_mode:
+ changed = True
+ else:
+ module.run_command([vgchange_cmd, VG_AUTOACTIVATION_OPT, 'n', vg], check_rc=True)
+ changed = True
+
+ if activate_flag is not None:
+ if module.check_mode:
+ changed = True
+ else:
+ module.run_command([vgchange_cmd, '--activate', activate_flag, vg], check_rc=True)
+ changed = True
+
+ return changed
+
+
+def append_vgcreate_options(module, state, vgoptions):
+ vgcreate_cmd = module.get_bin_path('vgcreate', True)
+
+ autoactivation_supported = is_autoactivation_supported(module=module, vg_cmd=vgcreate_cmd)
+
+ if autoactivation_supported and state in ['active', 'inactive']:
+ if VG_AUTOACTIVATION_OPT not in vgoptions:
+ if state == 'active':
+ vgoptions += [VG_AUTOACTIVATION_OPT, 'y']
+ else:
+ vgoptions += [VG_AUTOACTIVATION_OPT, 'n']
+
+
+def get_pv_values_for_resize(module, device):
+ pvdisplay_cmd = module.get_bin_path('pvdisplay', True)
+ pvdisplay_ops = ["--units", "b", "--columns", "--noheadings", "--nosuffix", "--separator", ";", "-o", "dev_size,pv_size,pe_start,vg_extent_size"]
+ pvdisplay_cmd_device_options = [pvdisplay_cmd, device] + pvdisplay_ops
+
+ dummy, pv_values, dummy = module.run_command(pvdisplay_cmd_device_options, check_rc=True)
+
+ values = pv_values.strip().split(';')
+
+ dev_size = int(values[0])
+ pv_size = int(values[1])
+ pe_start = int(values[2])
+ vg_extent_size = int(values[3])
+
+ return (dev_size, pv_size, pe_start, vg_extent_size)
+
+
+def resize_pv(module, device):
+ changed = False
+ pvresize_cmd = module.get_bin_path('pvresize', True)
+
+ dev_size, pv_size, pe_start, vg_extent_size = get_pv_values_for_resize(module=module, device=device)
+ if (dev_size - (pe_start + pv_size)) > vg_extent_size:
+ if module.check_mode:
+ changed = True
+ else:
+ # If there is a missing pv on the machine, versions of pvresize rc indicates failure.
+ rc, out, err = module.run_command([pvresize_cmd, device])
+ dummy, new_pv_size, dummy, dummy = get_pv_values_for_resize(module=module, device=device)
+ if pv_size == new_pv_size:
+ module.fail_json(msg="Failed executing pvresize command.", rc=rc, err=err, out=out)
+ else:
+ changed = True
+
+ return changed
+
+
+def reset_uuid_pv(module, device):
+ changed = False
+ pvs_cmd = module.get_bin_path('pvs', True)
+ pvs_cmd_with_opts = [pvs_cmd, '--noheadings', '-o', 'uuid', device]
+ pvchange_cmd = module.get_bin_path('pvchange', True)
+ pvchange_cmd_with_opts = [pvchange_cmd, '-u', device]
+
+ dummy, orig_uuid, dummy = module.run_command(pvs_cmd_with_opts, check_rc=True)
+
+ if module.check_mode:
+ changed = True
+ else:
+ # If there is a missing pv on the machine, pvchange rc indicates failure.
+ pvchange_rc, pvchange_out, pvchange_err = module.run_command(pvchange_cmd_with_opts)
+ dummy, new_uuid, dummy = module.run_command(pvs_cmd_with_opts, check_rc=True)
+ if orig_uuid.strip() == new_uuid.strip():
+ module.fail_json(msg="PV (%s) UUID change failed" % (device), rc=pvchange_rc, err=pvchange_err, out=pvchange_out)
+ else:
+ changed = True
+
+ return changed
+
+
+def reset_uuid_vg(module, vg):
+ changed = False
+ vgchange_cmd = module.get_bin_path('vgchange', True)
+ vgchange_cmd_with_opts = [vgchange_cmd, '-u', vg]
+ if module.check_mode:
+ changed = True
+ else:
+ module.run_command(vgchange_cmd_with_opts, check_rc=True)
+ changed = True
+
+ return changed
+
+
def main():
module = AnsibleModule(
argument_spec=dict(
@@ -165,9 +381,14 @@ def main():
pv_options=dict(type='str', default=''),
pvresize=dict(type='bool', default=False),
vg_options=dict(type='str', default=''),
- state=dict(type='str', default='present', choices=['absent', 'present']),
+ state=dict(type='str', default='present', choices=['absent', 'present', 'active', 'inactive']),
force=dict(type='bool', default=False),
+ reset_vg_uuid=dict(type='bool', default=False),
+ reset_pv_uuid=dict(type='bool', default=False),
),
+ required_if=[
+ ['reset_pv_uuid', True, ['pvs']],
+ ],
supports_check_mode=True,
)
@@ -178,18 +399,25 @@ def main():
pesize = module.params['pesize']
pvoptions = module.params['pv_options'].split()
vgoptions = module.params['vg_options'].split()
+ reset_vg_uuid = module.boolean(module.params['reset_vg_uuid'])
+ reset_pv_uuid = module.boolean(module.params['reset_pv_uuid'])
+
+ this_vg = find_vg(module=module, vg=vg)
+ present_state = state in ['present', 'active', 'inactive']
+ pvs_required = present_state and this_vg is None
+ changed = False
dev_list = []
if module.params['pvs']:
dev_list = list(module.params['pvs'])
- elif state == 'present':
+ elif pvs_required:
module.fail_json(msg="No physical volumes given.")
# LVM always uses real paths not symlinks so replace symlinks with actual path
for idx, dev in enumerate(dev_list):
dev_list[idx] = os.path.realpath(dev)
- if state == 'present':
+ if present_state:
# check given devices
for test_dev in dev_list:
if not os.path.exists(test_dev):
@@ -216,25 +444,9 @@ def main():
if used_pvs:
module.fail_json(msg="Device %s is already in %s volume group." % (used_pvs[0]['name'], used_pvs[0]['vg_name']))
- vgs_cmd = module.get_bin_path('vgs', True)
- rc, current_vgs, err = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd)
-
- if rc != 0:
- module.fail_json(msg="Failed executing vgs command.", rc=rc, err=err)
-
- changed = False
-
- vgs = parse_vgs(current_vgs)
-
- for test_vg in vgs:
- if test_vg['name'] == vg:
- this_vg = test_vg
- break
- else:
- this_vg = None
-
if this_vg is None:
- if state == 'present':
+ if present_state:
+ append_vgcreate_options(module=module, state=state, vgoptions=vgoptions)
# create VG
if module.check_mode:
changed = True
@@ -268,68 +480,61 @@ def main():
module.fail_json(msg="Failed to remove volume group %s" % (vg), rc=rc, err=err)
else:
module.fail_json(msg="Refuse to remove non-empty volume group %s without force=true" % (vg))
+ # activate/deactivate existing VG
+ elif state == 'active':
+ changed = activate_vg(module=module, vg=vg, active=True)
+ elif state == 'inactive':
+ changed = activate_vg(module=module, vg=vg, active=False)
+
+ # reset VG uuid
+ if reset_vg_uuid:
+ changed = reset_uuid_vg(module=module, vg=vg) or changed
# resize VG
- current_devs = [os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg]
- devs_to_remove = list(set(current_devs) - set(dev_list))
- devs_to_add = list(set(dev_list) - set(current_devs))
-
- if current_devs:
- if state == 'present' and pvresize:
- for device in current_devs:
- pvresize_cmd = module.get_bin_path('pvresize', True)
- pvdisplay_cmd = module.get_bin_path('pvdisplay', True)
- pvdisplay_ops = ["--units", "b", "--columns", "--noheadings", "--nosuffix"]
- pvdisplay_cmd_device_options = [pvdisplay_cmd, device] + pvdisplay_ops
- rc, dev_size, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "dev_size"])
- dev_size = int(dev_size.replace(" ", ""))
- rc, pv_size, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "pv_size"])
- pv_size = int(pv_size.replace(" ", ""))
- rc, pe_start, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "pe_start"])
- pe_start = int(pe_start.replace(" ", ""))
- rc, vg_extent_size, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "vg_extent_size"])
- vg_extent_size = int(vg_extent_size.replace(" ", ""))
- if (dev_size - (pe_start + pv_size)) > vg_extent_size:
- if module.check_mode:
+ if dev_list:
+ current_devs = [os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg]
+ devs_to_remove = list(set(current_devs) - set(dev_list))
+ devs_to_add = list(set(dev_list) - set(current_devs))
+
+ if current_devs:
+ if present_state:
+ for device in current_devs:
+ if pvresize:
+ changed = resize_pv(module=module, device=device) or changed
+ if reset_pv_uuid:
+ changed = reset_uuid_pv(module=module, device=device) or changed
+
+ if devs_to_add or devs_to_remove:
+ if module.check_mode:
+ changed = True
+ else:
+ if devs_to_add:
+ devs_to_add_string = ' '.join(devs_to_add)
+ # create PV
+ pvcreate_cmd = module.get_bin_path('pvcreate', True)
+ for current_dev in devs_to_add:
+ rc, dummy, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)])
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
+ # add PV to our VG
+ vgextend_cmd = module.get_bin_path('vgextend', True)
+ rc, dummy, err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string))
+ if rc == 0:
changed = True
else:
- rc, dummy, err = module.run_command([pvresize_cmd, device])
- if rc != 0:
- module.fail_json(msg="Failed executing pvresize command.", rc=rc, err=err)
- else:
- changed = True
+ module.fail_json(msg="Unable to extend %s by %s." % (vg, devs_to_add_string), rc=rc, err=err)
- if devs_to_add or devs_to_remove:
- if module.check_mode:
- changed = True
- else:
- if devs_to_add:
- devs_to_add_string = ' '.join(devs_to_add)
- # create PV
- pvcreate_cmd = module.get_bin_path('pvcreate', True)
- for current_dev in devs_to_add:
- rc, dummy, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)])
+ # remove some PV from our VG
+ if devs_to_remove:
+ devs_to_remove_string = ' '.join(devs_to_remove)
+ vgreduce_cmd = module.get_bin_path('vgreduce', True)
+ rc, dummy, err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string))
if rc == 0:
changed = True
else:
- module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
- # add PV to our VG
- vgextend_cmd = module.get_bin_path('vgextend', True)
- rc, dummy, err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string))
- if rc == 0:
- changed = True
- else:
- module.fail_json(msg="Unable to extend %s by %s." % (vg, devs_to_add_string), rc=rc, err=err)
-
- # remove some PV from our VG
- if devs_to_remove:
- devs_to_remove_string = ' '.join(devs_to_remove)
- vgreduce_cmd = module.get_bin_path('vgreduce', True)
- rc, dummy, err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string))
- if rc == 0:
- changed = True
- else:
- module.fail_json(msg="Unable to reduce %s by %s." % (vg, devs_to_remove_string), rc=rc, err=err)
+ module.fail_json(msg="Unable to reduce %s by %s." % (vg, devs_to_remove_string), rc=rc, err=err)
module.exit_json(changed=changed)
diff --git a/ansible_collections/community/general/plugins/modules/lvg_rename.py b/ansible_collections/community/general/plugins/modules/lvg_rename.py
new file mode 100644
index 000000000..bd48ffa62
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/lvg_rename.py
@@ -0,0 +1,170 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) Contributors to the Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - Laszlo Szomor (@lszomor)
+module: lvg_rename
+short_description: Renames LVM volume groups
+description:
+ - This module renames volume groups using the C(vgchange) command.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+version_added: 7.1.0
+options:
+ vg:
+ description:
+ - The name or UUID of the source VG.
+ - See V(vgrename(8\)) for valid values.
+ type: str
+ required: true
+ vg_new:
+ description:
+ - The new name of the VG.
+ - See V(lvm(8\)) for valid names.
+ type: str
+ required: true
+seealso:
+- module: community.general.lvg
+notes:
+ - This module does not modify VG renaming-related configurations like C(fstab) entries or boot parameters.
+'''
+
+EXAMPLES = r'''
+- name: Rename a VG by name
+ community.general.lvg_rename:
+ vg: vg_orig_name
+ vg_new: vg_new_name
+
+- name: Rename a VG by UUID
+ community.general.lvg_rename:
+ vg_uuid: SNgd0Q-rPYa-dPB8-U1g6-4WZI-qHID-N7y9Vj
+ vg_new: vg_new_name
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+argument_spec = dict(
+ vg=dict(type='str', required=True,),
+ vg_new=dict(type='str', required=True,),
+)
+
+
+class LvgRename(object):
+ def __init__(self, module):
+ '''
+ Orchestrates the lvg_rename module logic.
+
+ :param module: An AnsibleModule instance.
+ '''
+ self.module = module
+ self.result = {'changed': False}
+ self.vg_list = []
+ self._load_params()
+
+ def run(self):
+ """Performs the module logic."""
+
+ self._load_vg_list()
+
+ old_vg_exists = self._is_vg_exists(vg=self.vg)
+ new_vg_exists = self._is_vg_exists(vg=self.vg_new)
+
+ if old_vg_exists:
+ if new_vg_exists:
+ self.module.fail_json(msg='The new VG name (%s) is already in use.' % (self.vg_new))
+ else:
+ self._rename_vg()
+ else:
+ if new_vg_exists:
+ self.result['msg'] = 'The new VG (%s) already exists, nothing to do.' % (self.vg_new)
+ self.module.exit_json(**self.result)
+ else:
+ self.module.fail_json(msg='Both current (%s) and new (%s) VG are missing.' % (self.vg, self.vg_new))
+
+ self.module.exit_json(**self.result)
+
+ def _load_params(self):
+ """Load the parameters from the module."""
+
+ self.vg = self.module.params['vg']
+ self.vg_new = self.module.params['vg_new']
+
+ def _load_vg_list(self):
+ """Load the VGs from the system."""
+
+ vgs_cmd = self.module.get_bin_path('vgs', required=True)
+ vgs_cmd_with_opts = [vgs_cmd, '--noheadings', '--separator', ';', '-o', 'vg_name,vg_uuid']
+ dummy, vg_raw_list, dummy = self.module.run_command(vgs_cmd_with_opts, check_rc=True)
+
+ for vg_info in vg_raw_list.splitlines():
+ vg_name, vg_uuid = vg_info.strip().split(';')
+ self.vg_list.append(vg_name)
+ self.vg_list.append(vg_uuid)
+
+ def _is_vg_exists(self, vg):
+ '''
+ Checks VG existence by name or UUID. It removes the '/dev/' prefix before checking.
+
+ :param vg: A string with the name or UUID of the VG.
+ :returns: A boolean indicates whether the VG exists or not.
+ '''
+
+ vg_found = False
+ dev_prefix = '/dev/'
+
+ if vg.startswith(dev_prefix):
+ vg_id = vg[len(dev_prefix):]
+ else:
+ vg_id = vg
+
+ vg_found = vg_id in self.vg_list
+
+ return vg_found
+
+ def _rename_vg(self):
+ """Renames the volume group."""
+
+ vgrename_cmd = self.module.get_bin_path('vgrename', required=True)
+
+ if self.module._diff:
+ self.result['diff'] = {'before': {'vg': self.vg}, 'after': {'vg': self.vg_new}}
+
+ if self.module.check_mode:
+ self.result['msg'] = "Running in check mode. The module would rename VG %s to %s." % (self.vg, self.vg_new)
+ self.result['changed'] = True
+ else:
+ vgrename_cmd_with_opts = [vgrename_cmd, self.vg, self.vg_new]
+ dummy, vg_rename_out, dummy = self.module.run_command(vgrename_cmd_with_opts, check_rc=True)
+
+ self.result['msg'] = vg_rename_out
+ self.result['changed'] = True
+
+
+def setup_module_object():
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+ return module
+
+
+def main():
+ module = setup_module_object()
+ lvg_rename = LvgRename(module=module)
+ lvg_rename.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/lvol.py b/ansible_collections/community/general/plugins/modules/lvol.py
index d193a4e83..a2a870260 100644
--- a/ansible_collections/community/general/plugins/modules/lvol.py
+++ b/ansible_collections/community/general/plugins/modules/lvol.py
@@ -41,18 +41,18 @@ options:
description:
- The size of the logical volume, according to lvcreate(8) --size, by
default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or
- according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE];
+ according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE|ORIGIN];
Float values must begin with a digit.
- When resizing, apart from specifying an absolute size you may, according to
lvextend(8)|lvreduce(8) C(--size), specify the amount to extend the logical volume with
- the prefix C(+) or the amount to reduce the logical volume by with prefix C(-).
- - Resizing using C(+) or C(-) was not supported prior to community.general 3.0.0.
- - Please note that when using C(+) or C(-), the module is B(not idempotent).
+ the prefix V(+) or the amount to reduce the logical volume by with prefix V(-).
+ - Resizing using V(+) or V(-) was not supported prior to community.general 3.0.0.
+ - Please note that when using V(+), V(-), or percentage of FREE, the module is B(not idempotent).
state:
type: str
description:
- - Control if the logical volume exists. If C(present) and the
- volume does not already exist then the C(size) option is required.
+ - Control if the logical volume exists. If V(present) and the
+ volume does not already exist then the O(size) option is required.
choices: [ absent, present ]
default: present
active:
@@ -73,11 +73,12 @@ options:
snapshot:
type: str
description:
- - The name of the snapshot volume
+ - The name of a snapshot volume to be configured. When creating a snapshot volume, the O(lv) parameter specifies the origin volume.
pvs:
- type: str
+ type: list
+ elements: str
description:
- - Comma separated list of physical volumes (e.g. /dev/sda,/dev/sdb).
+ - List of physical volumes (for example V(/dev/sda, /dev/sdb)).
thinpool:
type: str
description:
@@ -110,7 +111,9 @@ EXAMPLES = '''
vg: firefly
lv: test
size: 512
- pvs: /dev/sda,/dev/sdb
+ pvs:
+ - /dev/sda
+ - /dev/sdb
- name: Create cache pool logical volume
community.general.lvol:
@@ -299,7 +302,7 @@ def main():
shrink=dict(type='bool', default=True),
active=dict(type='bool', default=True),
snapshot=dict(type='str'),
- pvs=dict(type='str'),
+ pvs=dict(type='list', elements='str'),
resizefs=dict(type='bool', default=False),
thinpool=dict(type='str'),
),
@@ -340,7 +343,7 @@ def main():
if pvs is None:
pvs = ""
else:
- pvs = pvs.replace(",", " ")
+ pvs = " ".join(pvs)
if opts is None:
opts = ""
@@ -368,10 +371,10 @@ def main():
if size_percent > 100:
module.fail_json(msg="Size percentage cannot be larger than 100%")
size_whole = size_parts[1]
- if size_whole == 'ORIGIN':
- module.fail_json(msg="Snapshot Volumes are not supported")
- elif size_whole not in ['VG', 'PVS', 'FREE']:
- module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE")
+ if size_whole == 'ORIGIN' and snapshot is None:
+ module.fail_json(msg="Percentage of ORIGIN supported only for snapshot volumes")
+ elif size_whole not in ['VG', 'PVS', 'FREE', 'ORIGIN']:
+ module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE|ORIGIN")
size_opt = 'l'
size_unit = ''
@@ -552,9 +555,9 @@ def main():
elif rc == 0:
changed = True
msg = "Volume %s resized to %s%s" % (this_lv['name'], size_requested, unit)
- elif "matches existing size" in err:
+ elif "matches existing size" in err or "matches existing size" in out:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
- elif "not larger than existing size" in err:
+ elif "not larger than existing size" in err or "not larger than existing size" in out:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
else:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
@@ -585,9 +588,9 @@ def main():
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
elif rc == 0:
changed = True
- elif "matches existing size" in err:
+ elif "matches existing size" in err or "matches existing size" in out:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
- elif "not larger than existing size" in err:
+ elif "not larger than existing size" in err or "not larger than existing size" in out:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
else:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
diff --git a/ansible_collections/community/general/plugins/modules/lxc_container.py b/ansible_collections/community/general/plugins/modules/lxc_container.py
index aec8f12dc..7ded041e9 100644
--- a/ansible_collections/community/general/plugins/modules/lxc_container.py
+++ b/ansible_collections/community/general/plugins/modules/lxc_container.py
@@ -92,7 +92,7 @@ options:
type: str
lxc_path:
description:
- - Place container under C(PATH).
+ - Place container under E(PATH).
type: path
container_log:
description:
@@ -111,7 +111,7 @@ options:
- debug
- DEBUG
description:
- - Set the log level for a container where I(container_log) was set.
+ - Set the log level for a container where O(container_log) was set.
type: str
required: false
default: INFO
@@ -158,7 +158,7 @@ options:
- clone
description:
- Define the state of a container.
- - If you clone a container using I(clone_name) the newly cloned
+ - If you clone a container using O(clone_name) the newly cloned
container created in a stopped state.
- The running container will be stopped while the clone operation is
happening and upon completion of the clone the original container
@@ -178,17 +178,17 @@ notes:
- Containers must have a unique name. If you attempt to create a container
with a name that already exists in the users namespace the module will
simply return as "unchanged".
- - The I(container_command) can be used with any state except C(absent). If
- used with state C(stopped) the container will be C(started), the command
- executed, and then the container C(stopped) again. Likewise if I(state=stopped)
+ - The O(container_command) can be used with any state except V(absent). If
+ used with state V(stopped) the container will be V(started), the command
+ executed, and then the container V(stopped) again. Likewise if O(state=stopped)
and the container does not exist it will be first created,
- C(started), the command executed, and then C(stopped). If you use a "|"
+ V(started), the command executed, and then V(stopped). If you use a "|"
in the variable you can use common script formatting within the variable
- itself. The I(container_command) option will always execute as BASH.
- When using I(container_command), a log file is created in the C(/tmp/) directory
+ itself. The O(container_command) option will always execute as BASH.
+ When using O(container_command), a log file is created in the C(/tmp/) directory
which contains both C(stdout) and C(stderr) of any command executed.
- - If I(archive=true) the system will attempt to create a compressed
- tarball of the running container. The I(archive) option supports LVM backed
+ - If O(archive=true) the system will attempt to create a compressed
+ tarball of the running container. The O(archive) option supports LVM backed
containers and will create a snapshot of the running container when
creating the archive.
- If your distro does not have a package for C(python3-lxc), which is a
@@ -1277,7 +1277,7 @@ class LxcContainerManagement(object):
"""
vg = self._get_lxc_vg()
- free_space, messurement = self._get_vg_free_pe(vg_name=vg)
+ free_space, measurement = self._get_vg_free_pe(vg_name=vg)
if free_space < float(snapshot_size_gb):
message = (
diff --git a/ansible_collections/community/general/plugins/modules/lxd_container.py b/ansible_collections/community/general/plugins/modules/lxd_container.py
index f10fc4872..9fd1b183b 100644
--- a/ansible_collections/community/general/plugins/modules/lxd_container.py
+++ b/ansible_collections/community/general/plugins/modules/lxd_container.py
@@ -34,32 +34,33 @@ options:
project:
description:
- 'Project of an instance.
- See U(https://github.com/lxc/lxd/blob/master/doc/projects.md).'
+ See U(https://documentation.ubuntu.com/lxd/en/latest/projects/).'
required: false
type: str
version_added: 4.8.0
architecture:
description:
- - 'The architecture for the instance (for example C(x86_64) or C(i686)).
- See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
+ - 'The architecture for the instance (for example V(x86_64) or V(i686)).
+ See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get).'
type: str
required: false
config:
description:
- - 'The config for the instance (for example C({"limits.cpu": "2"})).
- See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
+ - 'The config for the instance (for example V({"limits.cpu": "2"})).
+ See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get).'
- If the instance already exists and its "config" values in metadata
- obtained from the LXD API U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#instances-containers-and-virtual-machines)
- are different, this module tries to apply the configurations.
- - The keys starting with C(volatile.) are ignored for this comparison when I(ignore_volatile_options=true).
+ obtained from the LXD API U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get)
+ are different, then this module tries to apply the configurations
+ U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_put).
+ - The keys starting with C(volatile.) are ignored for this comparison when O(ignore_volatile_options=true).
type: dict
required: false
ignore_volatile_options:
description:
- - If set to C(true), options starting with C(volatile.) are ignored. As a result,
+ - If set to V(true), options starting with C(volatile.) are ignored. As a result,
they are reapplied for each execution.
- - This default behavior can be changed by setting this option to C(false).
- - The default value changed from C(true) to C(false) in community.general 6.0.0.
+ - This default behavior can be changed by setting this option to V(false).
+ - The default value changed from V(true) to V(false) in community.general 6.0.0.
type: bool
required: false
default: false
@@ -72,26 +73,23 @@ options:
devices:
description:
- 'The devices for the instance
- (for example C({ "rootfs": { "path": "/dev/kvm", "type": "unix-char" }})).
- See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
+ (for example V({ "rootfs": { "path": "/dev/kvm", "type": "unix-char" }})).
+ See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get).'
type: dict
required: false
ephemeral:
description:
- - Whether or not the instance is ephemeral (for example C(true) or C(false)).
- See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).
+ - Whether or not the instance is ephemeral (for example V(true) or V(false)).
+ See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/instances/instance_get).
required: false
type: bool
source:
description:
- 'The source for the instance
- (e.g. { "type": "image",
- "mode": "pull",
- "server": "https://images.linuxcontainers.org",
- "protocol": "lxd",
- "alias": "ubuntu/xenial/amd64" }).'
- - 'See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1) for complete API documentation.'
- - 'Note that C(protocol) accepts two choices: C(lxd) or C(simplestreams).'
+ (for example V({ "type": "image", "mode": "pull", "server": "https://images.linuxcontainers.org",
+ "protocol": "lxd", "alias": "ubuntu/xenial/amd64" })).'
+ - 'See U(https://documentation.ubuntu.com/lxd/en/latest/api/) for complete API documentation.'
+ - 'Note that C(protocol) accepts two choices: V(lxd) or V(simplestreams).'
required: false
type: dict
state:
@@ -125,7 +123,7 @@ options:
type: int
type:
description:
- - Instance type can be either C(virtual-machine) or C(container).
+ - Instance type can be either V(virtual-machine) or V(container).
required: false
default: container
choices:
@@ -135,7 +133,7 @@ options:
version_added: 4.1.0
wait_for_ipv4_addresses:
description:
- - If this is true, the C(lxd_container) waits until IPv4 addresses
+ - If this is V(true), the C(lxd_container) waits until IPv4 addresses
are set to the all network interfaces in the instance after
starting or restarting.
required: false
@@ -143,14 +141,14 @@ options:
type: bool
wait_for_container:
description:
- - If set to C(true), the tasks will wait till the task reports a
+ - If set to V(true), the tasks will wait till the task reports a
success status when performing container operations.
default: false
type: bool
version_added: 4.4.0
force_stop:
description:
- - If this is true, the C(lxd_container) forces to stop the instance
+ - If this is V(true), the C(lxd_container) forces to stop the instance
when it stops or restarts the instance.
required: false
default: false
@@ -201,7 +199,8 @@ notes:
2.1, the later requires python to be installed in the instance which can
be done with the command module.
- You can copy a file from the host to the instance
- with the Ansible M(ansible.builtin.copy) and M(ansible.builtin.template) module and the C(community.general.lxd) connection plugin.
+ with the Ansible M(ansible.builtin.copy) and M(ansible.builtin.template) module
+ and the P(community.general.lxd#connection) connection plugin.
See the example below.
- You can copy a file in the created instance to the localhost
with C(command=lxc file pull instance_name/dir/filename filename).
@@ -437,12 +436,12 @@ ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket'
# CONFIG_PARAMS is a list of config attribute names.
CONFIG_PARAMS = [
- 'architecture', 'config', 'devices', 'ephemeral', 'profiles', 'source'
+ 'architecture', 'config', 'devices', 'ephemeral', 'profiles', 'source', 'type'
]
# CONFIG_CREATION_PARAMS is a list of attribute names that are only applied
# on instance creation.
-CONFIG_CREATION_PARAMS = ['source']
+CONFIG_CREATION_PARAMS = ['source', 'type']
class LXDContainerManagement(object):
@@ -468,13 +467,6 @@ class LXDContainerManagement(object):
self.type = self.module.params['type']
- # LXD Rest API provides additional endpoints for creating containers and virtual-machines.
- self.api_endpoint = None
- if self.type == 'container':
- self.api_endpoint = '/1.0/containers'
- elif self.type == 'virtual-machine':
- self.api_endpoint = '/1.0/virtual-machines'
-
self.key_file = self.module.params.get('client_key')
if self.key_file is None:
self.key_file = '{0}/.config/lxc/client.key'.format(os.environ['HOME'])
@@ -500,6 +492,18 @@ class LXDContainerManagement(object):
)
except LXDClientException as e:
self.module.fail_json(msg=e.msg)
+
+ # LXD (3.19) Rest API provides instances endpoint, failback to containers and virtual-machines
+ # https://documentation.ubuntu.com/lxd/en/latest/rest-api/#instances-containers-and-virtual-machines
+ self.api_endpoint = '/1.0/instances'
+ check_api_endpoint = self.client.do('GET', '{0}?project='.format(self.api_endpoint), ok_error_codes=[404])
+
+ if check_api_endpoint['error_code'] == 404:
+ if self.type == 'container':
+ self.api_endpoint = '/1.0/containers'
+ elif self.type == 'virtual-machine':
+ self.api_endpoint = '/1.0/virtual-machines'
+
self.trust_password = self.module.params.get('trust_password', None)
self.actions = []
self.diff = {'before': {}, 'after': {}}
@@ -552,6 +556,8 @@ class LXDContainerManagement(object):
url = '{0}?{1}'.format(url, urlencode(url_params))
config = self.config.copy()
config['name'] = self.name
+ if self.type not in self.api_endpoint:
+ config['type'] = self.type
if not self.module.check_mode:
self.client.do('POST', url, config, wait_for_container=self.wait_for_container)
self.actions.append('create')
diff --git a/ansible_collections/community/general/plugins/modules/lxd_profile.py b/ansible_collections/community/general/plugins/modules/lxd_profile.py
index 45f499b78..13660fd91 100644
--- a/ansible_collections/community/general/plugins/modules/lxd_profile.py
+++ b/ansible_collections/community/general/plugins/modules/lxd_profile.py
@@ -32,7 +32,7 @@ options:
project:
description:
- 'Project of a profile.
- See U(https://github.com/lxc/lxd/blob/master/doc/projects.md).'
+ See U(https://documentation.ubuntu.com/lxd/en/latest/projects/).'
type: str
required: false
version_added: 4.8.0
@@ -43,12 +43,13 @@ options:
config:
description:
- 'The config for the instance (e.g. {"limits.memory": "4GB"}).
- See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)'
+ See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get).'
- If the profile already exists and its "config" value in metadata
obtained from
GET /1.0/profiles/<name>
- U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#get-19)
- are different, they this module tries to apply the configurations.
+ U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get)
+ are different, then this module tries to apply the configurations
+ U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_put).
- Not all config values are supported to apply the existing profile.
Maybe you need to delete and recreate a profile.
required: false
@@ -57,14 +58,14 @@ options:
description:
- 'The devices for the profile
(e.g. {"rootfs": {"path": "/dev/kvm", "type": "unix-char"}).
- See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)'
+ See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_get).'
required: false
type: dict
new_name:
description:
- A new name of a profile.
- If this parameter is specified a profile will be renamed to this name.
- See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-11)
+ See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_post).
required: false
type: str
merge_profile:
@@ -419,7 +420,7 @@ class LXDProfileManagement(object):
Rebuild the Profile by the configuration provided in the play.
Existing configurations are discarded.
- This ist the default behavior.
+ This is the default behavior.
Args:
dict(config): Dict with the old config in 'metadata' and new config in 'config'
diff --git a/ansible_collections/community/general/plugins/modules/lxd_project.py b/ansible_collections/community/general/plugins/modules/lxd_project.py
index 983531fa0..0d321808a 100644
--- a/ansible_collections/community/general/plugins/modules/lxd_project.py
+++ b/ansible_collections/community/general/plugins/modules/lxd_project.py
@@ -34,19 +34,20 @@ options:
type: str
config:
description:
- - 'The config for the project (for example C({"features.profiles": "true"})).
- See U(https://linuxcontainers.org/lxd/docs/master/projects/).'
+ - 'The config for the project (for example V({"features.profiles": "true"})).
+ See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_get).'
- If the project already exists and its "config" value in metadata
obtained from
C(GET /1.0/projects/<name>)
- U(https://linuxcontainers.org/lxd/docs/master/api/#/projects/project_get)
- are different, then this module tries to apply the configurations.
+ U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_get)
+ are different, then this module tries to apply the configurations
+ U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_put).
type: dict
new_name:
description:
- A new name of a project.
- If this parameter is specified a project will be renamed to this name.
- See U(https://linuxcontainers.org/lxd/docs/master/api/#/projects/project_post).
+ See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_post).
required: false
type: str
merge_project:
@@ -98,7 +99,7 @@ options:
running this module using the following command:
C(lxc config set core.trust_password <some random password>)
See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).'
- - If I(trust_password) is set, this module send a request for
+ - If O(trust_password) is set, this module send a request for
authentication before sending any requests.
required: false
type: str
@@ -146,7 +147,7 @@ logs:
elements: dict
contains:
type:
- description: Type of actions performed, currently only C(sent request).
+ description: Type of actions performed, currently only V(sent request).
type: str
sample: "sent request"
request:
@@ -166,7 +167,7 @@ logs:
type: str
sample: "(too long to be placed here)"
timeout:
- description: Timeout of HTTP request, C(null) if unset.
+ description: Timeout of HTTP request, V(null) if unset.
type: int
sample: null
response:
diff --git a/ansible_collections/community/general/plugins/modules/macports.py b/ansible_collections/community/general/plugins/modules/macports.py
index 6f40d0938..e81fb9142 100644
--- a/ansible_collections/community/general/plugins/modules/macports.py
+++ b/ansible_collections/community/general/plugins/modules/macports.py
@@ -55,7 +55,7 @@ options:
variant:
description:
- A port variant specification.
- - 'C(variant) is only supported with state: I(installed)/I(present).'
+ - 'O(variant) is only supported with O(state=installed) and O(state=present).'
aliases: ['variants']
type: str
'''
diff --git a/ansible_collections/community/general/plugins/modules/mail.py b/ansible_collections/community/general/plugins/modules/mail.py
index feaac6923..1916c140c 100644
--- a/ansible_collections/community/general/plugins/modules/mail.py
+++ b/ansible_collections/community/general/plugins/modules/mail.py
@@ -114,18 +114,18 @@ options:
default: utf-8
subtype:
description:
- - The minor mime type, can be either C(plain) or C(html).
- - The major type is always C(text).
+ - The minor mime type, can be either V(plain) or V(html).
+ - The major type is always V(text).
type: str
choices: [ html, plain ]
default: plain
secure:
description:
- - If C(always), the connection will only send email if the connection is Encrypted.
+ - If V(always), the connection will only send email if the connection is Encrypted.
If the server doesn't accept the encrypted connection it will fail.
- - If C(try), the connection will attempt to setup a secure SSL/TLS session, before trying to send.
- - If C(never), the connection will not attempt to setup a secure SSL/TLS session, before sending
- - If C(starttls), the connection will try to upgrade to a secure SSL/TLS connection, before sending.
+ - If V(try), the connection will attempt to setup a secure SSL/TLS session, before trying to send.
+ - If V(never), the connection will not attempt to setup a secure SSL/TLS session, before sending
+ - If V(starttls), the connection will try to upgrade to a secure SSL/TLS connection, before sending.
If it is unable to do so it will fail.
type: str
choices: [ always, never, starttls, try ]
@@ -140,6 +140,13 @@ options:
- Allows for manual specification of host for EHLO.
type: str
version_added: 3.8.0
+ message_id_domain:
+ description:
+ - The domain name to use for the L(Message-ID header, https://en.wikipedia.org/wiki/Message-ID).
+ - Note that this is only available on Python 3+. On Python 2, this value will be ignored.
+ type: str
+ default: ansible
+ version_added: 8.2.0
'''
EXAMPLES = r'''
@@ -205,10 +212,11 @@ EXAMPLES = r'''
body: System {{ ansible_hostname }} has been successfully provisioned.
secure: starttls
-- name: Sending an e-mail using StartTLS, remote server, custom EHLO
+- name: Sending an e-mail using StartTLS, remote server, custom EHLO, and timeout of 10 seconds
community.general.mail:
host: some.smtp.host.tld
port: 25
+ timeout: 10
ehlohost: my-resolvable-hostname.tld
to: John Smith <john.smith@example.com>
subject: Ansible-report
@@ -221,7 +229,7 @@ import smtplib
import ssl
import traceback
from email import encoders
-from email.utils import parseaddr, formataddr, formatdate
+from email.utils import parseaddr, formataddr, formatdate, make_msgid
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
@@ -253,6 +261,7 @@ def main():
subtype=dict(type='str', default='plain', choices=['html', 'plain']),
secure=dict(type='str', default='try', choices=['always', 'never', 'starttls', 'try']),
timeout=dict(type='int', default=20),
+ message_id_domain=dict(type='str', default='ansible'),
),
required_together=[['password', 'username']],
)
@@ -274,6 +283,7 @@ def main():
subtype = module.params.get('subtype')
secure = module.params.get('secure')
timeout = module.params.get('timeout')
+ message_id_domain = module.params['message_id_domain']
code = 0
secure_state = False
@@ -348,13 +358,19 @@ def main():
msg['From'] = formataddr((sender_phrase, sender_addr))
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = Header(subject, charset)
+ try:
+ msg['Message-ID'] = make_msgid(domain=message_id_domain)
+ except TypeError:
+ # `domain` is only available in Python 3
+ msg['Message-ID'] = make_msgid()
+ module.warn("The Message-ID domain cannot be set on Python 2; the system's hostname is used")
msg.preamble = "Multipart message"
for header in headers:
# NOTE: Backward compatible with old syntax using '|' as delimiter
for hdr in [x.strip() for x in header.split('|')]:
try:
- h_key, h_val = hdr.split('=')
+ h_key, h_val = hdr.split('=', 1)
h_val = to_native(Header(h_val, charset))
msg.add_header(h_key, h_val)
except Exception:
@@ -382,7 +398,7 @@ def main():
part = MIMEText(body + "\n\n", _subtype=subtype, _charset=charset)
msg.attach(part)
- # NOTE: Backware compatibility with old syntax using space as delimiter is not retained
+ # NOTE: Backward compatibility with old syntax using space as delimiter is not retained
# This breaks files with spaces in it :-(
for filename in attach_files:
try:
diff --git a/ansible_collections/community/general/plugins/modules/make.py b/ansible_collections/community/general/plugins/modules/make.py
index ebff6cfe1..39392afca 100644
--- a/ansible_collections/community/general/plugins/modules/make.py
+++ b/ansible_collections/community/general/plugins/modules/make.py
@@ -49,12 +49,22 @@ options:
params:
description:
- Any extra parameters to pass to make.
+ - If the value is empty, only the key will be used. For example, V(FOO:) will produce V(FOO), not V(FOO=).
type: dict
target:
description:
- The target to run.
- - Typically this would be something like C(install), C(test), or C(all).
+ - Typically this would be something like V(install), V(test), or V(all).
+ - O(target) and O(targets) are mutually exclusive.
type: str
+ targets:
+ description:
+ - The list of targets to run.
+ - Typically this would be something like V(install), V(test), or V(all).
+ - O(target) and O(targets) are mutually exclusive.
+ type: list
+ elements: str
+ version_added: 7.2.0
'''
EXAMPLES = r'''
@@ -81,12 +91,24 @@ EXAMPLES = r'''
chdir: /home/ubuntu/cool-project
target: all
file: /some-project/Makefile
+
+- name: build arm64 kernel on FreeBSD, with 16 parallel jobs
+ community.general.make:
+ chdir: /usr/src
+ jobs: 16
+ target: buildkernel
+ params:
+ # This adds -DWITH_FDT to the command line:
+ -DWITH_FDT:
+ # The following adds TARGET=arm64 TARGET_ARCH=aarch64 to the command line:
+ TARGET: arm64
+ TARGET_ARCH: aarch64
'''
RETURN = r'''
chdir:
description:
- - The value of the module parameter I(chdir).
+ - The value of the module parameter O(chdir).
type: str
returned: success
command:
@@ -97,24 +119,30 @@ command:
version_added: 6.5.0
file:
description:
- - The value of the module parameter I(file).
+ - The value of the module parameter O(file).
type: str
returned: success
jobs:
description:
- - The value of the module parameter I(jobs).
+ - The value of the module parameter O(jobs).
type: int
returned: success
params:
description:
- - The value of the module parameter I(params).
+ - The value of the module parameter O(params).
type: dict
returned: success
target:
description:
- - The value of the module parameter I(target).
+ - The value of the module parameter O(target).
+ type: str
+ returned: success
+targets:
+ description:
+ - The value of the module parameter O(targets).
type: str
returned: success
+ version_added: 7.2.0
'''
from ansible.module_utils.six import iteritems
@@ -155,12 +183,14 @@ def main():
module = AnsibleModule(
argument_spec=dict(
target=dict(type='str'),
+ targets=dict(type='list', elements='str'),
params=dict(type='dict'),
chdir=dict(type='path', required=True),
file=dict(type='path'),
make=dict(type='path'),
jobs=dict(type='int'),
),
+ mutually_exclusive=[('target', 'targets')],
supports_check_mode=True,
)
@@ -172,9 +202,8 @@ def main():
if not make_path:
# Fall back to system make
make_path = module.get_bin_path('make', required=True)
- make_target = module.params['target']
if module.params['params'] is not None:
- make_parameters = [k + '=' + str(v) for k, v in iteritems(module.params['params'])]
+ make_parameters = [k + (('=' + str(v)) if v is not None else '') for k, v in iteritems(module.params['params'])]
else:
make_parameters = []
@@ -188,7 +217,10 @@ def main():
base_command.extend(["-f", module.params['file']])
# add make target
- base_command.append(make_target)
+ if module.params['target']:
+ base_command.append(module.params['target'])
+ elif module.params['targets']:
+ base_command.extend(module.params['targets'])
# add makefile parameters
base_command.extend(make_parameters)
@@ -206,8 +238,7 @@ def main():
changed = False
else:
# The target isn't up to date, so we need to run it
- rc, out, err = run_command(base_command, module,
- check_rc=True)
+ rc, out, err = run_command(base_command, module, check_rc=True)
changed = True
# We don't report the return code, as if this module failed
@@ -221,6 +252,7 @@ def main():
stdout=out,
stderr=err,
target=module.params['target'],
+ targets=module.params['targets'],
params=module.params['params'],
chdir=module.params['chdir'],
file=module.params['file'],
diff --git a/ansible_collections/community/general/plugins/modules/manageiq_alert_profiles.py b/ansible_collections/community/general/plugins/modules/manageiq_alert_profiles.py
index c6cefad6a..eb6424bcd 100644
--- a/ansible_collections/community/general/plugins/modules/manageiq_alert_profiles.py
+++ b/ansible_collections/community/general/plugins/modules/manageiq_alert_profiles.py
@@ -72,7 +72,7 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false
+ validate_certs: false # only do this when you trust the network!
- name: Delete an alert profile from ManageIQ
community.general.manageiq_alert_profiles:
@@ -82,7 +82,7 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false
+ validate_certs: false # only do this when you trust the network!
'''
RETURN = '''
diff --git a/ansible_collections/community/general/plugins/modules/manageiq_alerts.py b/ansible_collections/community/general/plugins/modules/manageiq_alerts.py
index 518b29f1f..53f40fb00 100644
--- a/ansible_collections/community/general/plugins/modules/manageiq_alerts.py
+++ b/ansible_collections/community/general/plugins/modules/manageiq_alerts.py
@@ -91,7 +91,7 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false
+ validate_certs: false # only do this when you trust the network!
- name: Add an alert with a "miq expression" to ManageIQ
community.general.manageiq_alerts:
@@ -118,7 +118,7 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false
+ validate_certs: false # only do this when you trust the network!
- name: Delete an alert from ManageIQ
community.general.manageiq_alerts:
@@ -128,7 +128,7 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false
+ validate_certs: false # only do this when you trust the network!
'''
RETURN = '''
diff --git a/ansible_collections/community/general/plugins/modules/manageiq_group.py b/ansible_collections/community/general/plugins/modules/manageiq_group.py
index a142a939f..e060b9a01 100644
--- a/ansible_collections/community/general/plugins/modules/manageiq_group.py
+++ b/ansible_collections/community/general/plugins/modules/manageiq_group.py
@@ -52,7 +52,7 @@ options:
type: str
description:
- The the group role name
- - The C(role_id) has precedence over the C(role) when supplied.
+ - The O(role_id) has precedence over the O(role) when supplied.
required: false
default: null
tenant_id:
@@ -65,7 +65,7 @@ options:
type: str
description:
- The tenant for the group identified by the tenant name.
- - The C(tenant_id) has precedence over the C(tenant) when supplied.
+ - The O(tenant_id) has precedence over the O(tenant) when supplied.
- Tenant names are case sensitive.
required: false
default: null
@@ -78,7 +78,7 @@ options:
type: str
description:
- In merge mode existing categories are kept or updated, new categories are added.
- - In replace mode all categories will be replaced with the supplied C(managed_filters).
+ - In replace mode all categories will be replaced with the supplied O(managed_filters).
choices: [ merge, replace ]
default: replace
belongsto_filters:
@@ -90,8 +90,8 @@ options:
belongsto_filters_merge_mode:
type: str
description:
- - In merge mode existing settings are merged with the supplied C(belongsto_filters).
- - In replace mode current values are replaced with the supplied C(belongsto_filters).
+ - In merge mode existing settings are merged with the supplied O(belongsto_filters).
+ - In replace mode current values are replaced with the supplied O(belongsto_filters).
choices: [ merge, replace ]
default: replace
'''
@@ -103,10 +103,10 @@ EXAMPLES = '''
role: 'EvmRole-user'
tenant: 'my_tenant'
manageiq_connection:
- url: 'https://manageiq_server'
+ url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false
+ validate_certs: false # only do this when you trust the network!
- name: Create a group in ManageIQ with the role EvmRole-user and tenant with tenant_id 4
community.general.manageiq_group:
@@ -114,10 +114,10 @@ EXAMPLES = '''
role: 'EvmRole-user'
tenant_id: 4
manageiq_connection:
- url: 'https://manageiq_server'
+ url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false
+ validate_certs: false # only do this when you trust the network!
- name:
- Create or update a group in ManageIQ with the role EvmRole-user and tenant my_tenant.
@@ -140,10 +140,10 @@ EXAMPLES = '''
- "/belongsto/ExtManagementSystem|ProviderName/EmsFolder|Datacenters/EmsFolder|dc_name/EmsFolder|host/EmsCluster|Cluster name"
belongsto_filters_merge_mode: merge
manageiq_connection:
- url: 'https://manageiq_server'
+ url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false
+ validate_certs: false # only do this when you trust the network!
- name: Delete a group in ManageIQ
community.general.manageiq_group:
diff --git a/ansible_collections/community/general/plugins/modules/manageiq_policies.py b/ansible_collections/community/general/plugins/modules/manageiq_policies.py
index 061168f7f..f2101ad28 100644
--- a/ansible_collections/community/general/plugins/modules/manageiq_policies.py
+++ b/ansible_collections/community/general/plugins/modules/manageiq_policies.py
@@ -32,20 +32,16 @@ options:
state:
type: str
description:
- - C(absent) - policy_profiles should not exist,
- - C(present) - policy_profiles should exist,
- - >
- C(list) - list current policy_profiles and policies.
- This state is deprecated and will be removed 8.0.0.
- Please use the module M(community.general.manageiq_policies_info) instead.
- choices: ['absent', 'present', 'list']
+ - V(absent) - policy_profiles should not exist,
+ - V(present) - policy_profiles should exist,
+ choices: ['absent', 'present']
default: 'present'
policy_profiles:
type: list
elements: dict
description:
- - List of dictionaries, each includes the policy_profile C(name) key.
- - Required if I(state) is C(present) or C(absent).
+ - List of dictionaries, each includes the policy_profile V(name) key.
+ - Required if O(state) is V(present) or V(absent).
resource_type:
type: str
description:
@@ -58,12 +54,12 @@ options:
type: str
description:
- The name of the resource to which the profile should be [un]assigned.
- - Must be specified if I(resource_id) is not set. Both options are mutually exclusive.
+ - Must be specified if O(resource_id) is not set. Both options are mutually exclusive.
resource_id:
type: int
description:
- The ID of the resource to which the profile should be [un]assigned.
- - Must be specified if I(resource_name) is not set. Both options are mutually exclusive.
+ - Must be specified if O(resource_name) is not set. Both options are mutually exclusive.
version_added: 2.2.0
'''
@@ -78,7 +74,7 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false
+ validate_certs: false # only do this when you trust the network!
- name: Unassign a policy_profile for a provider in ManageIQ
community.general.manageiq_policies:
@@ -91,18 +87,7 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false
-
-- name: List current policy_profile and policies for a provider in ManageIQ
- community.general.manageiq_policies:
- state: list
- resource_name: 'EngLab'
- resource_type: 'provider'
- manageiq_connection:
- url: 'http://127.0.0.1:3000'
- username: 'admin'
- password: 'smartvm'
- validate_certs: false
+ validate_certs: false # only do this when you trust the network!
'''
RETURN = '''
@@ -144,7 +129,7 @@ from ansible_collections.community.general.plugins.module_utils.manageiq import
def main():
- actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'}
+ actions = {'present': 'assign', 'absent': 'unassign'}
argument_spec = dict(
policy_profiles=dict(type='list', elements='dict'),
resource_id=dict(type='int'),
@@ -152,7 +137,7 @@ def main():
resource_type=dict(required=True, type='str',
choices=list(manageiq_entities().keys())),
state=dict(required=False, type='str',
- choices=['present', 'absent', 'list'], default='present'),
+ choices=['present', 'absent'], default='present'),
)
# add the manageiq connection arguments to the arguments
argument_spec.update(manageiq_argument_spec())
@@ -173,13 +158,6 @@ def main():
resource_name = module.params['resource_name']
state = module.params['state']
- if state == "list":
- module.deprecate(
- 'The value "list" for "state" is deprecated. Please use community.general.manageiq_policies_info instead.',
- version='8.0.0',
- collection_name='community.general'
- )
-
# get the action and resource type
action = actions[state]
resource_type = manageiq_entities()[resource_type_key]
@@ -187,13 +165,8 @@ def main():
manageiq = ManageIQ(module)
manageiq_policies = manageiq.policies(resource_id, resource_type, resource_name)
- if action == 'list':
- # return a list of current profiles for this object
- current_profiles = manageiq_policies.query_resource_profiles()
- res_args = dict(changed=False, profiles=current_profiles)
- else:
- # assign or unassign the profiles
- res_args = manageiq_policies.assign_or_unassign_profiles(policy_profiles, action)
+ # assign or unassign the profiles
+ res_args = manageiq_policies.assign_or_unassign_profiles(policy_profiles, action)
module.exit_json(**res_args)
diff --git a/ansible_collections/community/general/plugins/modules/manageiq_policies_info.py b/ansible_collections/community/general/plugins/modules/manageiq_policies_info.py
index 8a75ef646..fda7dcadf 100644
--- a/ansible_collections/community/general/plugins/modules/manageiq_policies_info.py
+++ b/ansible_collections/community/general/plugins/modules/manageiq_policies_info.py
@@ -38,12 +38,12 @@ options:
type: str
description:
- The name of the resource to obtain the profile for.
- - Must be specified if I(resource_id) is not set. Both options are mutually exclusive.
+ - Must be specified if O(resource_id) is not set. Both options are mutually exclusive.
resource_id:
type: int
description:
- The ID of the resource to obtain the profile for.
- - Must be specified if I(resource_name) is not set. Both options are mutually exclusive.
+ - Must be specified if O(resource_name) is not set. Both options are mutually exclusive.
'''
EXAMPLES = '''
diff --git a/ansible_collections/community/general/plugins/modules/manageiq_provider.py b/ansible_collections/community/general/plugins/modules/manageiq_provider.py
index bbc27214b..e6ded9ea7 100644
--- a/ansible_collections/community/general/plugins/modules/manageiq_provider.py
+++ b/ansible_collections/community/general/plugins/modules/manageiq_provider.py
@@ -75,6 +75,7 @@ options:
provider:
description: Default endpoint connection information, required if state is true.
+ type: dict
suboptions:
hostname:
type: str
@@ -104,9 +105,30 @@ options:
certificate_authority:
type: str
description: The CA bundle string with custom certificates. defaults to None.
+ path:
+ type: str
+ description:
+ - TODO needs documentation.
+ project:
+ type: str
+ description:
+ - TODO needs documentation.
+ role:
+ type: str
+ description:
+ - TODO needs documentation.
+ subscription:
+ type: str
+ description:
+ - TODO needs documentation.
+ uid_ems:
+ type: str
+ description:
+ - TODO needs documentation.
metrics:
description: Metrics endpoint connection information.
+ type: dict
suboptions:
hostname:
type: str
@@ -138,10 +160,27 @@ options:
description: The CA bundle string with custom certificates. defaults to None.
path:
type: str
- description: Database name for oVirt metrics. Defaults to C(ovirt_engine_history).
+ description: Database name for oVirt metrics. Defaults to V(ovirt_engine_history).
+ project:
+ type: str
+ description:
+ - TODO needs documentation.
+ role:
+ type: str
+ description:
+ - TODO needs documentation.
+ subscription:
+ type: str
+ description:
+ - TODO needs documentation.
+ uid_ems:
+ type: str
+ description:
+ - TODO needs documentation.
alerts:
description: Alerts endpoint connection information.
+ type: dict
suboptions:
hostname:
type: str
@@ -171,9 +210,30 @@ options:
certificate_authority:
type: str
description: The CA bundle string with custom certificates. defaults to None.
+ path:
+ type: str
+ description:
+ - TODO needs documentation.
+ project:
+ type: str
+ description:
+ - TODO needs documentation.
+ role:
+ type: str
+ description:
+ - TODO needs documentation.
+ subscription:
+ type: str
+ description:
+ - TODO needs documentation.
+ uid_ems:
+ type: str
+ description:
+ - TODO needs documentation.
ssh_keypair:
description: SSH key pair used for SSH connections to all hosts in this provider.
+ type: dict
suboptions:
hostname:
type: str
@@ -191,6 +251,43 @@ options:
type: bool
default: true
aliases: [ verify_ssl ]
+ security_protocol:
+ type: str
+ choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation', 'non-ssl']
+ description:
+ - TODO needs documentation.
+ certificate_authority:
+ type: str
+ description:
+ - TODO needs documentation.
+ password:
+ type: str
+ description:
+ - TODO needs documentation.
+ path:
+ type: str
+ description:
+ - TODO needs documentation.
+ project:
+ type: str
+ description:
+ - TODO needs documentation.
+ role:
+ type: str
+ description:
+ - TODO needs documentation.
+ subscription:
+ type: str
+ description:
+ - TODO needs documentation.
+ uid_ems:
+ type: str
+ description:
+ - TODO needs documentation.
+ port:
+ type: int
+ description:
+ - TODO needs documentation.
'''
EXAMPLES = '''
@@ -438,7 +535,7 @@ EXAMPLES = '''
url: 'https://cf-6af0.rhpds.opentlc.com'
username: 'admin'
password: 'password'
- validate_certs: false
+ validate_certs: true
- name: Create a new OpenStack Director provider in ManageIQ with rsa keypair
community.general.manageiq_provider:
diff --git a/ansible_collections/community/general/plugins/modules/manageiq_tags.py b/ansible_collections/community/general/plugins/modules/manageiq_tags.py
index 7e190d49c..3ab5eca4f 100644
--- a/ansible_collections/community/general/plugins/modules/manageiq_tags.py
+++ b/ansible_collections/community/general/plugins/modules/manageiq_tags.py
@@ -32,17 +32,16 @@ options:
state:
type: str
description:
- - C(absent) - tags should not exist.
- - C(present) - tags should exist.
- - C(list) - list current tags.
- choices: ['absent', 'present', 'list']
+ - V(absent) - tags should not exist.
+ - V(present) - tags should exist.
+ choices: ['absent', 'present']
default: 'present'
tags:
type: list
elements: dict
description:
- - C(tags) - list of dictionaries, each includes C(name) and c(category) keys.
- - Required if I(state) is C(present) or C(absent).
+ - V(tags) - list of dictionaries, each includes C(name) and C(category) keys.
+ - Required if O(state) is V(present) or V(absent).
resource_type:
type: str
description:
@@ -55,11 +54,11 @@ options:
type: str
description:
- The name of the resource at which tags will be controlled.
- - Must be specified if I(resource_id) is not set. Both options are mutually exclusive.
+ - Must be specified if O(resource_id) is not set. Both options are mutually exclusive.
resource_id:
description:
- The ID of the resource at which tags will be controlled.
- - Must be specified if I(resource_name) is not set. Both options are mutually exclusive.
+ - Must be specified if O(resource_name) is not set. Both options are mutually exclusive.
type: int
version_added: 2.2.0
'''
@@ -78,7 +77,7 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false
+ validate_certs: false # only do this when connecting to localhost!
- name: Create new tags for a provider in ManageIQ.
community.general.manageiq_tags:
@@ -93,7 +92,7 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false
+ validate_certs: false # only do this when connecting to localhost!
- name: Remove tags for a provider in ManageIQ.
community.general.manageiq_tags:
@@ -109,18 +108,7 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false
-
-- name: List current tags for a provider in ManageIQ.
- community.general.manageiq_tags:
- state: list
- resource_name: 'EngLab'
- resource_type: 'provider'
- manageiq_connection:
- url: 'http://127.0.0.1:3000'
- username: 'admin'
- password: 'smartvm'
- validate_certs: false
+ validate_certs: false # only do this when connecting to localhost!
'''
RETURN = '''
@@ -133,7 +121,7 @@ from ansible_collections.community.general.plugins.module_utils.manageiq import
def main():
- actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'}
+ actions = {'present': 'assign', 'absent': 'unassign'}
argument_spec = dict(
tags=dict(type='list', elements='dict'),
resource_id=dict(type='int'),
@@ -141,7 +129,7 @@ def main():
resource_type=dict(required=True, type='str',
choices=list(manageiq_entities().keys())),
state=dict(required=False, type='str',
- choices=['present', 'absent', 'list'], default='present'),
+ choices=['present', 'absent'], default='present'),
)
# add the manageiq connection arguments to the arguments
argument_spec.update(manageiq_argument_spec())
@@ -174,13 +162,8 @@ def main():
manageiq_tags = ManageIQTags(manageiq, resource_type, resource_id)
- if action == 'list':
- # return a list of current tags for this object
- current_tags = manageiq_tags.query_resource_tags()
- res_args = dict(changed=False, tags=current_tags)
- else:
- # assign or unassign the tags
- res_args = manageiq_tags.assign_or_unassign_tags(tags, action)
+ # assign or unassign the tags
+ res_args = manageiq_tags.assign_or_unassign_tags(tags, action)
module.exit_json(**res_args)
diff --git a/ansible_collections/community/general/plugins/modules/manageiq_tags_info.py b/ansible_collections/community/general/plugins/modules/manageiq_tags_info.py
index af71e150c..75e111540 100644
--- a/ansible_collections/community/general/plugins/modules/manageiq_tags_info.py
+++ b/ansible_collections/community/general/plugins/modules/manageiq_tags_info.py
@@ -36,11 +36,11 @@ options:
type: str
description:
- The name of the resource at which tags will be controlled.
- - Must be specified if I(resource_id) is not set. Both options are mutually exclusive.
+ - Must be specified if O(resource_id) is not set. Both options are mutually exclusive.
resource_id:
description:
- The ID of the resource at which tags will be controlled.
- - Must be specified if I(resource_name) is not set. Both options are mutually exclusive.
+ - Must be specified if O(resource_name) is not set. Both options are mutually exclusive.
type: int
'''
diff --git a/ansible_collections/community/general/plugins/modules/manageiq_tenant.py b/ansible_collections/community/general/plugins/modules/manageiq_tenant.py
index d68e26a73..a5a56191e 100644
--- a/ansible_collections/community/general/plugins/modules/manageiq_tenant.py
+++ b/ansible_collections/community/general/plugins/modules/manageiq_tenant.py
@@ -50,13 +50,13 @@ options:
type: int
description:
- The id of the parent tenant. If not supplied the root tenant is used.
- - The C(parent_id) takes president over C(parent) when supplied
+ - The O(parent_id) takes president over O(parent) when supplied
required: false
default: null
parent:
type: str
description:
- - The name of the parent tenant. If not supplied and no C(parent_id) is supplied the root tenant is used.
+ - The name of the parent tenant. If not supplied and no O(parent_id) is supplied the root tenant is used.
required: false
default: null
quotas:
@@ -83,7 +83,7 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false
+ validate_certs: false # only do this when you trust the network!
- name: Create a tenant in ManageIQ
community.general.manageiq_tenant:
@@ -94,7 +94,7 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false
+ validate_certs: false # only do this when you trust the network!
- name: Delete a tenant in ManageIQ
community.general.manageiq_tenant:
@@ -105,7 +105,7 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false
+ validate_certs: false # only do this when you trust the network!
- name: Set tenant quota for cpu_allocated, mem_allocated, remove quota for vms_allocated
community.general.manageiq_tenant:
@@ -119,7 +119,7 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false
+ validate_certs: false # only do this when you trust the network!
- name: Delete a tenant in ManageIQ using a token
@@ -130,7 +130,7 @@ EXAMPLES = '''
manageiq_connection:
url: 'http://127.0.0.1:3000'
token: 'sometoken'
- validate_certs: false
+ validate_certs: false # only do this when you trust the network!
'''
RETURN = '''
diff --git a/ansible_collections/community/general/plugins/modules/manageiq_user.py b/ansible_collections/community/general/plugins/modules/manageiq_user.py
index 0d3d8718b..0d8a81984 100644
--- a/ansible_collections/community/general/plugins/modules/manageiq_user.py
+++ b/ansible_collections/community/general/plugins/modules/manageiq_user.py
@@ -60,7 +60,7 @@ options:
default: always
choices: ['always', 'on_create']
description:
- - C(always) will update passwords unconditionally. C(on_create) will only set the password for a newly created user.
+ - V(always) will update passwords unconditionally. V(on_create) will only set the password for a newly created user.
'''
EXAMPLES = '''
@@ -75,7 +75,7 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false
+ validate_certs: false # only do this when you trust the network!
- name: Create a new user in ManageIQ using a token
community.general.manageiq_user:
@@ -87,7 +87,7 @@ EXAMPLES = '''
manageiq_connection:
url: 'http://127.0.0.1:3000'
token: 'sometoken'
- validate_certs: false
+ validate_certs: false # only do this when you trust the network!
- name: Delete a user in ManageIQ
community.general.manageiq_user:
@@ -97,7 +97,7 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false
+ validate_certs: false # only do this when you trust the network!
- name: Delete a user in ManageIQ using a token
community.general.manageiq_user:
@@ -106,7 +106,7 @@ EXAMPLES = '''
manageiq_connection:
url: 'http://127.0.0.1:3000'
token: 'sometoken'
- validate_certs: false
+ validate_certs: false # only do this when you trust the network!
- name: Update email of user in ManageIQ
community.general.manageiq_user:
@@ -116,7 +116,7 @@ EXAMPLES = '''
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- validate_certs: false
+ validate_certs: false # only do this when you trust the network!
- name: Update email of user in ManageIQ using a token
community.general.manageiq_user:
@@ -125,7 +125,7 @@ EXAMPLES = '''
manageiq_connection:
url: 'http://127.0.0.1:3000'
token: 'sometoken'
- validate_certs: false
+ validate_certs: false # only do this when you trust the network!
'''
RETURN = '''
diff --git a/ansible_collections/community/general/plugins/modules/mas.py b/ansible_collections/community/general/plugins/modules/mas.py
index 5b8958beb..8bb80840c 100644
--- a/ansible_collections/community/general/plugins/modules/mas.py
+++ b/ansible_collections/community/general/plugins/modules/mas.py
@@ -36,7 +36,7 @@ options:
state:
description:
- Desired state of the app installation.
- - The C(absent) value requires root permissions, also see the examples.
+ - The V(absent) value requires root permissions, also see the examples.
type: str
choices:
- absent
@@ -53,6 +53,8 @@ requirements:
- macOS 10.11+
- "mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path"
- The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)).
+ - The feature of "checking if user is signed in" is disabled for anyone using macOS 12.0+.
+ - Users need to sign in via the Mac App Store GUI beforehand for anyone using macOS 12.0+ due to U(https://github.com/mas-cli/mas/issues/417).
'''
EXAMPLES = '''
@@ -106,6 +108,9 @@ import os
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+import platform
+NOT_WORKING_MAC_VERSION_MAS_ACCOUNT = '12.0'
+
class Mas(object):
@@ -115,6 +120,7 @@ class Mas(object):
# Initialize data properties
self.mas_path = self.module.get_bin_path('mas')
self._checked_signin = False
+ self._mac_version = platform.mac_ver()[0] or '0.0'
self._installed = None # Populated only if needed
self._outdated = None # Populated only if needed
self.count_install = 0
@@ -156,14 +162,16 @@ class Mas(object):
def check_signin(self):
''' Verifies that the user is signed in to the Mac App Store '''
-
# Only check this once per execution
if self._checked_signin:
return
-
- rc, out, err = self.run(['account'])
- if out.split("\n", 1)[0].rstrip() == 'Not signed in':
- self.module.fail_json(msg='You must be signed in to the Mac App Store')
+ if LooseVersion(self._mac_version) >= LooseVersion(NOT_WORKING_MAC_VERSION_MAS_ACCOUNT):
+ # Checking if user is signed-in is disabled due to https://github.com/mas-cli/mas/issues/417
+ self.module.log('WARNING: You must be signed in via the Mac App Store GUI beforehand else error will occur')
+ else:
+ rc, out, err = self.run(['account'])
+ if out.split("\n", 1)[0].rstrip() == 'Not signed in':
+ self.module.fail_json(msg='You must be signed in to the Mac App Store')
self._checked_signin = True
diff --git a/ansible_collections/community/general/plugins/modules/mattermost.py b/ansible_collections/community/general/plugins/modules/mattermost.py
index 29894c3a7..154040a8f 100644
--- a/ansible_collections/community/general/plugins/modules/mattermost.py
+++ b/ansible_collections/community/general/plugins/modules/mattermost.py
@@ -39,26 +39,26 @@ options:
description:
- Mattermost webhook api key. Log into your mattermost site, go to
Menu -> Integration -> Incoming Webhook -> Add Incoming Webhook.
- This will give you full URL. api_key is the last part.
+ This will give you full URL. O(api_key) is the last part.
http://mattermost.example.com/hooks/C(API_KEY)
required: true
text:
type: str
description:
- Text to send. Note that the module does not handle escaping characters.
- - Required when I(attachments) is not set.
+ - Required when O(attachments) is not set.
attachments:
type: list
elements: dict
description:
- Define a list of attachments.
- For more information, see U(https://developers.mattermost.com/integrate/admin-guide/admin-message-attachments/).
- - Required when I(text) is not set.
+ - Required when O(text) is not set.
version_added: 4.3.0
channel:
type: str
description:
- - Channel to send the message to. If absent, the message goes to the channel selected for the I(api_key).
+ - Channel to send the message to. If absent, the message goes to the channel selected for the O(api_key).
username:
type: str
description:
@@ -71,7 +71,7 @@ options:
default: https://docs.ansible.com/favicon.ico
validate_certs:
description:
- - If C(false), SSL certificates will not be validated. This should only be used
+ - If V(false), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
default: true
type: bool
diff --git a/ansible_collections/community/general/plugins/modules/maven_artifact.py b/ansible_collections/community/general/plugins/modules/maven_artifact.py
index 3f9defa52..0dc020c37 100644
--- a/ansible_collections/community/general/plugins/modules/maven_artifact.py
+++ b/ansible_collections/community/general/plugins/modules/maven_artifact.py
@@ -43,14 +43,14 @@ options:
type: str
description:
- The maven version coordinate
- - Mutually exclusive with I(version_by_spec).
+ - Mutually exclusive with O(version_by_spec).
version_by_spec:
type: str
description:
- The maven dependency version ranges.
- See supported version ranges on U(https://cwiki.apache.org/confluence/display/MAVENOLD/Dependency+Mediation+and+Conflict+Resolution)
- The range type "(,1.0],[1.2,)" and "(,1.1),(1.1,)" is not supported.
- - Mutually exclusive with I(version).
+ - Mutually exclusive with O(version).
version_added: '0.2.0'
classifier:
type: str
@@ -111,48 +111,48 @@ options:
default: 10
validate_certs:
description:
- - If C(false), SSL certificates will not be validated. This should only be set to C(false) when no other option exists.
+ - If V(false), SSL certificates will not be validated. This should only be set to V(false) when no other option exists.
type: bool
default: true
client_cert:
description:
- PEM formatted certificate chain file to be used for SSL client authentication.
- - This file can also include the key as well, and if the key is included, I(client_key) is not required.
+ - This file can also include the key as well, and if the key is included, O(client_key) is not required.
type: path
version_added: '1.3.0'
client_key:
description:
- PEM formatted file that contains your private key to be used for SSL client authentication.
- - If I(client_cert) contains both the certificate and key, this option is not required.
+ - If O(client_cert) contains both the certificate and key, this option is not required.
type: path
version_added: '1.3.0'
keep_name:
description:
- - If C(true), the downloaded artifact's name is preserved, i.e the version number remains part of it.
- - This option only has effect when C(dest) is a directory and C(version) is set to C(latest) or C(version_by_spec)
+ - If V(true), the downloaded artifact's name is preserved, i.e the version number remains part of it.
+ - This option only has effect when O(dest) is a directory and O(version) is set to V(latest) or O(version_by_spec)
is defined.
type: bool
default: false
verify_checksum:
type: str
description:
- - If C(never), the MD5/SHA1 checksum will never be downloaded and verified.
- - If C(download), the MD5/SHA1 checksum will be downloaded and verified only after artifact download. This is the default.
- - If C(change), the MD5/SHA1 checksum will be downloaded and verified if the destination already exist,
+ - If V(never), the MD5/SHA1 checksum will never be downloaded and verified.
+ - If V(download), the MD5/SHA1 checksum will be downloaded and verified only after artifact download. This is the default.
+ - If V(change), the MD5/SHA1 checksum will be downloaded and verified if the destination already exist,
to verify if they are identical. This was the behaviour before 2.6. Since it downloads the checksum before (maybe)
downloading the artifact, and since some repository software, when acting as a proxy/cache, return a 404 error
if the artifact has not been cached yet, it may fail unexpectedly.
- If you still need it, you should consider using C(always) instead - if you deal with a checksum, it is better to
+ If you still need it, you should consider using V(always) instead - if you deal with a checksum, it is better to
use it to verify integrity after download.
- - C(always) combines C(download) and C(change).
+ - V(always) combines V(download) and V(change).
required: false
default: 'download'
choices: ['never', 'download', 'change', 'always']
checksum_alg:
type: str
description:
- - If C(md5), checksums will use the MD5 algorithm. This is the default.
- - If C(sha1), checksums will use the SHA1 algorithm. This can be used on systems configured to use
+ - If V(md5), checksums will use the MD5 algorithm. This is the default.
+ - If V(sha1), checksums will use the SHA1 algorithm. This can be used on systems configured to use
FIPS-compliant algorithms, since MD5 will be blocked on such systems.
default: 'md5'
choices: ['md5', 'sha1']
@@ -162,14 +162,14 @@ options:
elements: str
version_added: 5.2.0
description:
- - A list of headers that should not be included in the redirection. This headers are sent to the fetch_url C(fetch_url) function.
- - On ansible-core version 2.12 or later, the default of this option is C([Authorization, Cookie]).
+ - A list of headers that should not be included in the redirection. This headers are sent to the C(fetch_url) function.
+ - On ansible-core version 2.12 or later, the default of this option is V([Authorization, Cookie]).
- Useful if the redirection URL does not need to have sensitive headers in the request.
- Requires ansible-core version 2.12 or later.
directory_mode:
type: str
description:
- - Filesystem permission mode applied recursively to I(dest) when it is a directory.
+ - Filesystem permission mode applied recursively to O(dest) when it is a directory.
extends_documentation_fragment:
- ansible.builtin.files
- community.general.attributes
diff --git a/ansible_collections/community/general/plugins/modules/memset_dns_reload.py b/ansible_collections/community/general/plugins/modules/memset_dns_reload.py
index a1168724f..668c8c0bf 100644
--- a/ansible_collections/community/general/plugins/modules/memset_dns_reload.py
+++ b/ansible_collections/community/general/plugins/modules/memset_dns_reload.py
@@ -18,8 +18,8 @@ notes:
happen every 15 minutes by default, however you can request an immediate reload if
later tasks rely on the records being created. An API key generated via the
Memset customer control panel is required with the following minimum scope -
- I(dns.reload). If you wish to poll the job status to wait until the reload has
- completed, then I(job.status) is also required.
+ C(dns.reload). If you wish to poll the job status to wait until the reload has
+ completed, then C(job.status) is also required.
description:
- Request a reload of Memset's DNS infrastructure, and optionally poll until it finishes.
extends_documentation_fragment:
diff --git a/ansible_collections/community/general/plugins/modules/memset_memstore_info.py b/ansible_collections/community/general/plugins/modules/memset_memstore_info.py
index 5fc9d79e1..c00ef15eb 100644
--- a/ansible_collections/community/general/plugins/modules/memset_memstore_info.py
+++ b/ansible_collections/community/general/plugins/modules/memset_memstore_info.py
@@ -15,10 +15,9 @@ author: "Simon Weald (@glitchcrab)"
short_description: Retrieve Memstore product usage information
notes:
- An API key generated via the Memset customer control panel is needed with the
- following minimum scope - I(memstore.usage).
+ following minimum scope - C(memstore.usage).
description:
- Retrieve Memstore product usage information.
- - This module was called C(memset_memstore_facts) before Ansible 2.9. The usage did not change.
extends_documentation_fragment:
- community.general.attributes
- community.general.attributes.info_module
@@ -36,7 +35,7 @@ options:
required: true
type: str
description:
- - The Memstore product name (i.e. C(mstestyaa1)).
+ - The Memstore product name (that is, C(mstestyaa1)).
'''
EXAMPLES = '''
diff --git a/ansible_collections/community/general/plugins/modules/memset_server_info.py b/ansible_collections/community/general/plugins/modules/memset_server_info.py
index ecc0375eb..78ea99df3 100644
--- a/ansible_collections/community/general/plugins/modules/memset_server_info.py
+++ b/ansible_collections/community/general/plugins/modules/memset_server_info.py
@@ -15,10 +15,9 @@ author: "Simon Weald (@glitchcrab)"
short_description: Retrieve server information
notes:
- An API key generated via the Memset customer control panel is needed with the
- following minimum scope - I(server.info).
+ following minimum scope - C(server.info).
description:
- Retrieve server information.
- - This module was called C(memset_server_facts) before Ansible 2.9. The usage did not change.
extends_documentation_fragment:
- community.general.attributes
- community.general.attributes.info_module
@@ -36,7 +35,7 @@ options:
required: true
type: str
description:
- - The server product name (i.e. C(testyaa1)).
+ - The server product name (that is, C(testyaa1)).
'''
EXAMPLES = '''
diff --git a/ansible_collections/community/general/plugins/modules/memset_zone.py b/ansible_collections/community/general/plugins/modules/memset_zone.py
index e17472e39..f520d5446 100644
--- a/ansible_collections/community/general/plugins/modules/memset_zone.py
+++ b/ansible_collections/community/general/plugins/modules/memset_zone.py
@@ -17,7 +17,7 @@ notes:
- Zones can be thought of as a logical group of domains, all of which share the
same DNS records (i.e. they point to the same IP). An API key generated via the
Memset customer control panel is needed with the following minimum scope -
- I(dns.zone_create), I(dns.zone_delete), I(dns.zone_list).
+ C(dns.zone_create), C(dns.zone_delete), C(dns.zone_list).
description:
- Manage DNS zones in a Memset account.
extends_documentation_fragment:
diff --git a/ansible_collections/community/general/plugins/modules/memset_zone_domain.py b/ansible_collections/community/general/plugins/modules/memset_zone_domain.py
index 172a48be2..e07ac1ff0 100644
--- a/ansible_collections/community/general/plugins/modules/memset_zone_domain.py
+++ b/ansible_collections/community/general/plugins/modules/memset_zone_domain.py
@@ -17,9 +17,9 @@ notes:
- Zone domains can be thought of as a collection of domains, all of which share the
same DNS records (i.e. they point to the same IP). An API key generated via the
Memset customer control panel is needed with the following minimum scope -
- I(dns.zone_domain_create), I(dns.zone_domain_delete), I(dns.zone_domain_list).
+ C(dns.zone_domain_create), C(dns.zone_domain_delete), C(dns.zone_domain_list).
- Currently this module can only create one domain at a time. Multiple domains should
- be created using C(with_items).
+ be created using C(loop).
description:
- Manage DNS zone domains in a Memset account.
extends_documentation_fragment:
diff --git a/ansible_collections/community/general/plugins/modules/memset_zone_record.py b/ansible_collections/community/general/plugins/modules/memset_zone_record.py
index 4e56a11ca..8406d93d2 100644
--- a/ansible_collections/community/general/plugins/modules/memset_zone_record.py
+++ b/ansible_collections/community/general/plugins/modules/memset_zone_record.py
@@ -17,9 +17,9 @@ notes:
- Zones can be thought of as a logical group of domains, all of which share the
same DNS records (i.e. they point to the same IP). An API key generated via the
Memset customer control panel is needed with the following minimum scope -
- I(dns.zone_create), I(dns.zone_delete), I(dns.zone_list).
+ C(dns.zone_create), C(dns.zone_delete), C(dns.zone_list).
- Currently this module can only create one DNS record at a time. Multiple records
- should be created using C(with_items).
+ should be created using C(loop).
description:
- Manage DNS records in a Memset account.
extends_documentation_fragment:
diff --git a/ansible_collections/community/general/plugins/modules/modprobe.py b/ansible_collections/community/general/plugins/modules/modprobe.py
index 6389d758d..f271b3946 100644
--- a/ansible_collections/community/general/plugins/modules/modprobe.py
+++ b/ansible_collections/community/general/plugins/modules/modprobe.py
@@ -49,14 +49,14 @@ options:
description:
- Persistency between reboots for configured module.
- This option creates files in C(/etc/modules-load.d/) and C(/etc/modprobe.d/) that make your module configuration persistent during reboots.
- - If C(present), adds module name to C(/etc/modules-load.d/) and params to C(/etc/modprobe.d/) so the module will be loaded on next reboot.
- - If C(absent), will comment out module name from C(/etc/modules-load.d/) and comment out params from C(/etc/modprobe.d/) so the module will not be
+ - If V(present), adds module name to C(/etc/modules-load.d/) and params to C(/etc/modprobe.d/) so the module will be loaded on next reboot.
+ - If V(absent), will comment out module name from C(/etc/modules-load.d/) and comment out params from C(/etc/modprobe.d/) so the module will not be
loaded on next reboot.
- - If C(disabled), will not touch anything and leave C(/etc/modules-load.d/) and C(/etc/modprobe.d/) as it is.
+ - If V(disabled), will not touch anything and leave C(/etc/modules-load.d/) and C(/etc/modprobe.d/) as it is.
- Note that it is usually a better idea to rely on the automatic module loading by PCI IDs, USB IDs, DMI IDs or similar triggers encoded in the
kernel modules themselves instead of configuration like this.
- In fact, most modern kernel modules are prepared for automatic loading already.
- - "B(Note:) This option works only with distributions that use C(systemd) when set to values other than C(disabled)."
+ - "B(Note:) This option works only with distributions that use C(systemd) when set to values other than V(disabled)."
'''
EXAMPLES = '''
@@ -232,12 +232,16 @@ class Modprobe(object):
@property
def modules_files(self):
+ if not os.path.isdir(MODULES_LOAD_LOCATION):
+ return []
modules_paths = [os.path.join(MODULES_LOAD_LOCATION, path)
for path in os.listdir(MODULES_LOAD_LOCATION)]
return [path for path in modules_paths if os.path.isfile(path)]
@property
def modprobe_files(self):
+ if not os.path.isdir(PARAMETERS_FILES_LOCATION):
+ return []
modules_paths = [os.path.join(PARAMETERS_FILES_LOCATION, path)
for path in os.listdir(PARAMETERS_FILES_LOCATION)]
return [path for path in modules_paths if os.path.isfile(path)]
diff --git a/ansible_collections/community/general/plugins/modules/monit.py b/ansible_collections/community/general/plugins/modules/monit.py
index d2a160678..5475ab1e5 100644
--- a/ansible_collections/community/general/plugins/modules/monit.py
+++ b/ansible_collections/community/general/plugins/modules/monit.py
@@ -14,7 +14,7 @@ DOCUMENTATION = '''
module: monit
short_description: Manage the state of a program monitored via Monit
description:
- - Manage the state of a program monitored via I(Monit).
+ - Manage the state of a program monitored via Monit.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -25,7 +25,7 @@ attributes:
options:
name:
description:
- - The name of the I(monit) program/process to manage.
+ - The name of the C(monit) program/process to manage.
required: true
type: str
state:
diff --git a/ansible_collections/community/general/plugins/modules/mqtt.py b/ansible_collections/community/general/plugins/modules/mqtt.py
index 389382649..f8d64e6a0 100644
--- a/ansible_collections/community/general/plugins/modules/mqtt.py
+++ b/ansible_collections/community/general/plugins/modules/mqtt.py
@@ -40,7 +40,7 @@ options:
password:
type: str
description:
- - Password for C(username) to authenticate against the broker.
+ - Password for O(username) to authenticate against the broker.
client_id:
type: str
description:
@@ -54,8 +54,8 @@ options:
payload:
type: str
description:
- - Payload. The special string C("None") may be used to send a NULL
- (i.e. empty) payload which is useful to simply notify with the I(topic)
+ - Payload. The special string V("None") may be used to send a NULL
+ (that is, empty) payload which is useful to simply notify with the O(topic)
or to clear previously retained messages.
required: true
qos:
diff --git a/ansible_collections/community/general/plugins/modules/mssql_db.py b/ansible_collections/community/general/plugins/modules/mssql_db.py
index 4006033cf..a85f721fc 100644
--- a/ansible_collections/community/general/plugins/modules/mssql_db.py
+++ b/ansible_collections/community/general/plugins/modules/mssql_db.py
@@ -71,7 +71,6 @@ notes:
- Requires the pymssql Python package on the remote host. For Ubuntu, this
is as easy as pip install pymssql (See M(ansible.builtin.pip).)
requirements:
- - python >= 2.7
- pymssql
author: Vedit Firat Arig (@vedit)
'''
diff --git a/ansible_collections/community/general/plugins/modules/mssql_script.py b/ansible_collections/community/general/plugins/modules/mssql_script.py
index 1696000db..b1713092c 100644
--- a/ansible_collections/community/general/plugins/modules/mssql_script.py
+++ b/ansible_collections/community/general/plugins/modules/mssql_script.py
@@ -46,33 +46,41 @@ options:
type: str
required: true
login_port:
- description: Port of the MSSQL server. Requires I(login_host) be defined as well.
+ description: Port of the MSSQL server. Requires O(login_host) be defined as well.
default: 1433
type: int
script:
description:
- The SQL script to be executed.
- - Script can contain multiple SQL statements. Multiple Batches can be separated by C(GO) command.
+ - Script can contain multiple SQL statements. Multiple Batches can be separated by V(GO) command.
- Each batch must return at least one result set.
required: true
type: str
+ transaction:
+ description:
+ - If transactional mode is requested, start a transaction and commit the change only if the script succeed.
+ Otherwise, rollback the transaction.
+ - If transactional mode is not requested (default), automatically commit the change.
+ type: bool
+ default: false
+ version_added: 8.4.0
output:
description:
- - With C(default) each row will be returned as a list of values. See C(query_results).
- - Output format C(dict) will return dictionary with the column names as keys. See C(query_results_dict).
- - C(dict) requires named columns to be returned by each query otherwise an error is thrown.
+ - With V(default) each row will be returned as a list of values. See RV(query_results).
+ - Output format V(dict) will return dictionary with the column names as keys. See RV(query_results_dict).
+ - V(dict) requires named columns to be returned by each query otherwise an error is thrown.
choices: [ "dict", "default" ]
default: 'default'
type: str
params:
description: |
- Parameters passed to the script as SQL parameters. ('SELECT %(name)s"' with C(example: '{"name": "John Doe"}).)'
+ Parameters passed to the script as SQL parameters.
+ (Query V('SELECT %(name\)s"') with V(example: '{"name": "John Doe"}).)'
type: dict
notes:
- Requires the pymssql Python package on the remote host. For Ubuntu, this
is as easy as C(pip install pymssql) (See M(ansible.builtin.pip).)
requirements:
- - python >= 2.7
- pymssql
author:
@@ -105,6 +113,19 @@ EXAMPLES = r'''
- result_params.query_results[0][0][0][0] == 'msdb'
- result_params.query_results[0][0][0][1] == 'ONLINE'
+- name: Query within a transaction
+ community.general.mssql_script:
+ login_user: "{{ mssql_login_user }}"
+ login_password: "{{ mssql_login_password }}"
+ login_host: "{{ mssql_host }}"
+ login_port: "{{ mssql_port }}"
+ script: |
+ UPDATE sys.SomeTable SET desc = 'some_table_desc' WHERE name = %(dbname)s
+ UPDATE sys.AnotherTable SET desc = 'another_table_desc' WHERE name = %(dbname)s
+ transaction: true
+ params:
+ dbname: msdb
+
- name: two batches with default output
community.general.mssql_script:
login_user: "{{ mssql_login_user }}"
@@ -148,17 +169,17 @@ EXAMPLES = r'''
RETURN = r'''
query_results:
- description: List of batches (queries separated by C(GO) keyword).
+ description: List of batches (queries separated by V(GO) keyword).
type: list
elements: list
- returned: success and I(output=default)
+ returned: success and O(output=default)
sample: [[[["Batch 0 - Select 0"]], [["Batch 0 - Select 1"]]], [[["Batch 1 - Select 0"]]]]
contains:
queries:
description:
- List of result sets of each query.
- If a query returns no results, the results of this and all the following queries will not be included in the output.
- - Use the C(GO) keyword in I(script) to separate queries.
+ - Use the V(GO) keyword in O(script) to separate queries.
type: list
elements: list
contains:
@@ -175,10 +196,10 @@ query_results:
example: ["Batch 0 - Select 0"]
returned: success, if output is default
query_results_dict:
- description: List of batches (queries separated by C(GO) keyword).
+ description: List of batches (queries separated by V(GO) keyword).
type: list
elements: list
- returned: success and I(output=dict)
+ returned: success and O(output=dict)
sample: [[[["Batch 0 - Select 0"]], [["Batch 0 - Select 1"]]], [[["Batch 1 - Select 0"]]]]
contains:
queries:
@@ -230,6 +251,7 @@ def run_module():
script=dict(required=True),
output=dict(default='default', choices=['dict', 'default']),
params=dict(type='dict'),
+ transaction=dict(type='bool', default=False),
)
result = dict(
@@ -252,6 +274,8 @@ def run_module():
script = module.params['script']
output = module.params['output']
sql_params = module.params['params']
+ # Added param to set the transactional mode (true/false)
+ transaction = module.params['transaction']
login_querystring = login_host
if login_port != 1433:
@@ -273,21 +297,40 @@ def run_module():
module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your "
"@sysconfdir@/freetds.conf / ${HOME}/.freetds.conf")
- conn.autocommit(True)
+ # If transactional mode is requested, start a transaction
+ conn.autocommit(not transaction)
query_results_key = 'query_results'
if output == 'dict':
cursor = conn.cursor(as_dict=True)
query_results_key = 'query_results_dict'
- queries = script.split('\nGO\n')
+ # Process the script into batches
+ queries = []
+ current_batch = []
+ for statement in script.splitlines(True):
+ # Ignore the Byte Order Mark, if found
+ if statement.strip() == '\uFEFF':
+ continue
+
+ # Assume each 'GO' is on its own line but may have leading/trailing whitespace
+ # and be of mixed-case
+ if statement.strip().upper() != 'GO':
+ current_batch.append(statement)
+ else:
+ queries.append(''.join(current_batch))
+ current_batch = []
+ if len(current_batch) > 0:
+ queries.append(''.join(current_batch))
+
result['changed'] = True
if module.check_mode:
module.exit_json(**result)
query_results = []
- try:
- for query in queries:
+ for query in queries:
+ # Catch and exit on any bad query errors
+ try:
cursor.execute(query, sql_params)
qry_result = []
rows = cursor.fetchall()
@@ -295,8 +338,24 @@ def run_module():
qry_result.append(rows)
rows = cursor.fetchall()
query_results.append(qry_result)
- except Exception as e:
- return module.fail_json(msg="query failed", query=query, error=str(e), **result)
+ except Exception as e:
+ # We know we executed the statement so this error just means we have no resultset
+ # which is ok (eg UPDATE/INSERT)
+ if (
+ type(e).__name__ == 'OperationalError' and
+ str(e) == 'Statement not executed or executed statement has no resultset'
+ ):
+ query_results.append([])
+ else:
+ # Rollback transaction before failing the module in case of error
+ if transaction:
+ conn.rollback()
+ error_msg = '%s: %s' % (type(e).__name__, str(e))
+ module.fail_json(msg="query failed", query=query, error=error_msg, **result)
+
+ # Commit transaction before exiting the module in case of no error
+ if transaction:
+ conn.commit()
# ensure that the result is json serializable
qry_results = json.loads(json.dumps(query_results, default=clean_output))
diff --git a/ansible_collections/community/general/plugins/modules/nagios.py b/ansible_collections/community/general/plugins/modules/nagios.py
index 1831d0496..783aa88e2 100644
--- a/ansible_collections/community/general/plugins/modules/nagios.py
+++ b/ansible_collections/community/general/plugins/modules/nagios.py
@@ -21,13 +21,13 @@ short_description: Perform common tasks in Nagios related to downtime and notifi
description:
- "The C(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts."
- The C(nagios) module is not idempotent.
- - All actions require the I(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) variable to refer
+ - All actions require the O(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) variable to refer
to the host the playbook is currently running on.
- - You can specify multiple services at once by separating them with commas, .e.g. I(services=httpd,nfs,puppet).
- - When specifying what service to handle there is a special service value, I(host), which will handle alerts/downtime/acknowledge for the I(host itself),
- e.g., I(service=host). This keyword may not be given with other services at the same time.
- I(Setting alerts/downtime/acknowledge for a host does not affect alerts/downtime/acknowledge for any of the services running on it.)
- To schedule downtime for all services on particular host use keyword "all", e.g., I(service=all).
+ - You can specify multiple services at once by separating them with commas, .e.g. O(services=httpd,nfs,puppet).
+ - When specifying what service to handle there is a special service value, O(host), which will handle alerts/downtime/acknowledge for the I(host itself),
+ for example O(services=host). This keyword may not be given with other services at the same time.
+ B(Setting alerts/downtime/acknowledge for a host does not affect alerts/downtime/acknowledge for any of the services running on it.)
+ To schedule downtime for all services on particular host use keyword "all", for example O(services=all).
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -41,7 +41,7 @@ options:
- Action to take.
- servicegroup options were added in 2.0.
- delete_downtime options were added in 2.2.
- - The C(acknowledge) and C(forced_check) actions were added in community.general 1.2.0.
+ - The V(acknowledge) and V(forced_check) actions were added in community.general 1.2.0.
required: true
choices: [ "downtime", "delete_downtime", "enable_alerts", "disable_alerts", "silence", "unsilence",
"silence_nagios", "unsilence_nagios", "command", "servicegroup_service_downtime",
@@ -59,12 +59,12 @@ options:
author:
description:
- Author to leave downtime comments as.
- Only used when I(action) is C(downtime) or C(acknowledge).
+ Only used when O(action) is V(downtime) or V(acknowledge).
type: str
default: Ansible
comment:
description:
- - Comment when I(action) is C(downtime) or C(acknowledge).
+ - Comment when O(action) is V(downtime) or V(acknowledge).
type: str
default: Scheduling downtime
start:
@@ -75,27 +75,24 @@ options:
minutes:
description:
- Minutes to schedule downtime for.
- - Only usable with the C(downtime) action.
+ - Only usable with O(action=downtime).
type: int
default: 30
services:
description:
- - >
- What to manage downtime/alerts for. Separate multiple services with commas.
- I(service) is an alias for I(services).
- B(Required) option when I(action) is one of: C(downtime), C(acknowledge), C(forced_check), C(enable_alerts), C(disable_alerts).
+ - What to manage downtime/alerts for. Separate multiple services with commas.
+ - "B(Required) option when O(action) is one of: V(downtime), V(acknowledge), V(forced_check), V(enable_alerts), V(disable_alerts)."
aliases: [ "service" ]
type: str
servicegroup:
description:
- The Servicegroup we want to set downtimes/alerts for.
- B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime).
+ - B(Required) option when using the V(servicegroup_service_downtime) and V(servicegroup_host_downtime) O(action).
type: str
command:
description:
- - The raw command to send to nagios, which
- should not include the submitted time header or the line-feed
- B(Required) option when using the C(command) action.
+ - The raw command to send to nagios, which should not include the submitted time header or the line-feed.
+ - B(Required) option when using the V(command) O(action).
type: str
author: "Tim Bielawa (@tbielawa)"
diff --git a/ansible_collections/community/general/plugins/modules/netcup_dns.py b/ansible_collections/community/general/plugins/modules/netcup_dns.py
index 77be50b2c..cba70c0fa 100644
--- a/ansible_collections/community/general/plugins/modules/netcup_dns.py
+++ b/ansible_collections/community/general/plugins/modules/netcup_dns.py
@@ -46,14 +46,17 @@ options:
type: str
record:
description:
- - Record to add or delete, supports wildcard (*). Default is C(@) (e.g. the zone name).
+ - Record to add or delete, supports wildcard (V(*)). Default is V(@) (that is, the zone name).
default: "@"
aliases: [ name ]
type: str
type:
description:
- Record type.
- choices: ['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS']
+ - Support for V(OPENPGPKEY), V(SMIMEA) and V(SSHFP) was added in community.general 8.1.0.
+ - Record types V(OPENPGPKEY) and V(SMIMEA) require nc-dnsapi >= 0.1.5.
+ - Record type V(SSHFP) requires nc-dnsapi >= 0.1.6.
+ choices: ['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS', 'OPENPGPKEY', 'SMIMEA', 'SSHFP']
required: true
type: str
value:
@@ -65,11 +68,11 @@ options:
type: bool
default: false
description:
- - Whether the record should be the only one for that record type and record name. Only use with I(state=present).
+ - Whether the record should be the only one for that record type and record name. Only use with O(state=present).
- This will delete all other records with the same record name and type.
priority:
description:
- - Record priority. Required for I(type=MX).
+ - Record priority. Required for O(type=MX).
required: false
type: int
state:
@@ -169,7 +172,7 @@ records:
sample: fancy-hostname
type:
description: the record type
- returned: succcess
+ returned: success
type: str
sample: A
value:
@@ -213,7 +216,9 @@ def main():
domain=dict(required=True),
record=dict(required=False, default='@', aliases=['name']),
- type=dict(required=True, choices=['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS']),
+ type=dict(required=True, choices=['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT',
+ 'TLSA', 'NS', 'DS', 'OPENPGPKEY', 'SMIMEA',
+ 'SSHFP']),
value=dict(required=True),
priority=dict(required=False, type='int'),
solo=dict(required=False, type='bool', default=False),
diff --git a/ansible_collections/community/general/plugins/modules/newrelic_deployment.py b/ansible_collections/community/general/plugins/modules/newrelic_deployment.py
index ac9903b57..e5a116082 100644
--- a/ansible_collections/community/general/plugins/modules/newrelic_deployment.py
+++ b/ansible_collections/community/general/plugins/modules/newrelic_deployment.py
@@ -32,14 +32,14 @@ options:
app_name:
type: str
description:
- - The value of app_name in the newrelic.yml file used by the application.
- - One of I(app_name) or I(application_id) is required.
+ - The value of C(app_name) in the C(newrelic.yml) file used by the application.
+ - One of O(app_name) or O(application_id) is required.
required: false
application_id:
type: str
description:
- The application ID found in the metadata of the application in APM.
- - One of I(app_name) or I(application_id) is required.
+ - One of O(app_name) or O(application_id) is required.
required: false
changelog:
type: str
@@ -61,25 +61,21 @@ options:
description:
- The name of the user/process that triggered this deployment
required: false
- appname:
- type: str
- description:
- - Name of the application.
- - This option has been deprecated and will be removed in community.general 7.0.0. Please do not use.
- required: false
- environment:
- type: str
- description:
- - The environment for this deployment.
- - This option has been deprecated and will be removed community.general 7.0.0. Please do not use.
- required: false
validate_certs:
description:
- - If C(false), SSL certificates will not be validated. This should only be used
+ - If V(false), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: true
type: bool
+ app_name_exact_match:
+ type: bool
+ description:
+ - If this flag is set to V(true) then the application ID lookup by name would only work for an exact match.
+ If set to V(false) it returns the first result.
+ required: false
+ default: false
+ version_added: 7.5.0
requirements: []
'''
@@ -113,11 +109,11 @@ def main():
description=dict(required=False),
revision=dict(required=True),
user=dict(required=False),
- appname=dict(required=False, removed_in_version='7.0.0', removed_from_collection='community.general'),
- environment=dict(required=False, removed_in_version='7.0.0', removed_from_collection='community.general'),
validate_certs=dict(default=True, type='bool'),
+ app_name_exact_match=dict(required=False, type='bool', default=False),
),
required_one_of=[['app_name', 'application_id']],
+ required_if=[('app_name_exact_match', True, ['app_name'])],
supports_check_mode=True
)
@@ -125,7 +121,6 @@ def main():
params = {}
if module.params["app_name"] and module.params["application_id"]:
module.fail_json(msg="only one of 'app_name' or 'application_id' can be set")
-
app_id = None
if module.params["app_name"]:
app_id = get_application_id(module)
@@ -164,6 +159,7 @@ def main():
def get_application_id(module):
url = "https://api.newrelic.com/v2/applications.json"
data = "filter[name]=%s" % module.params["app_name"]
+ application_id = None
headers = {
'Api-Key': module.params["token"],
}
@@ -175,7 +171,17 @@ def get_application_id(module):
if result is None or len(result.get("applications", "")) == 0:
module.fail_json(msg='No application found with name "%s"' % module.params["app_name"])
- return result["applications"][0]["id"]
+ if module.params["app_name_exact_match"]:
+ for item in result["applications"]:
+ if item["name"] == module.params["app_name"]:
+ application_id = item["id"]
+ break
+ if application_id is None:
+ module.fail_json(msg='No application found with exact name "%s"' % module.params["app_name"])
+ else:
+ application_id = result["applications"][0]["id"]
+
+ return application_id
if __name__ == '__main__':
diff --git a/ansible_collections/community/general/plugins/modules/nexmo.py b/ansible_collections/community/general/plugins/modules/nexmo.py
index 7461c1cb9..39f127f98 100644
--- a/ansible_collections/community/general/plugins/modules/nexmo.py
+++ b/ansible_collections/community/general/plugins/modules/nexmo.py
@@ -50,7 +50,7 @@ options:
required: true
validate_certs:
description:
- - If C(false), SSL certificates will not be validated. This should only be used
+ - If V(false), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
type: bool
default: true
diff --git a/ansible_collections/community/general/plugins/modules/nictagadm.py b/ansible_collections/community/general/plugins/modules/nictagadm.py
index 074e09b4a..5b81861e8 100644
--- a/ansible_collections/community/general/plugins/modules/nictagadm.py
+++ b/ansible_collections/community/general/plugins/modules/nictagadm.py
@@ -31,23 +31,23 @@ options:
type: str
mac:
description:
- - Specifies the I(mac) address to attach the nic tag to when not creating an I(etherstub).
- - Parameters I(mac) and I(etherstub) are mutually exclusive.
+ - Specifies the O(mac) address to attach the nic tag to when not creating an O(etherstub).
+ - Parameters O(mac) and O(etherstub) are mutually exclusive.
type: str
etherstub:
description:
- - Specifies that the nic tag will be attached to a created I(etherstub).
- - Parameter I(etherstub) is mutually exclusive with both I(mtu), and I(mac).
+ - Specifies that the nic tag will be attached to a created O(etherstub).
+ - Parameter O(etherstub) is mutually exclusive with both O(mtu), and O(mac).
type: bool
default: false
mtu:
description:
- - Specifies the size of the I(mtu) of the desired nic tag.
- - Parameters I(mtu) and I(etherstub) are mutually exclusive.
+ - Specifies the size of the O(mtu) of the desired nic tag.
+ - Parameters O(mtu) and O(etherstub) are mutually exclusive.
type: int
force:
description:
- - When I(state) is absent set this switch will use the C(-f) parameter and delete the nic tag regardless of existing VMs.
+ - When O(state=absent) this switch will use the C(-f) parameter and delete the nic tag regardless of existing VMs.
type: bool
default: false
state:
diff --git a/ansible_collections/community/general/plugins/modules/nmcli.py b/ansible_collections/community/general/plugins/modules/nmcli.py
index 08680bf6e..9360ce37d 100644
--- a/ansible_collections/community/general/plugins/modules/nmcli.py
+++ b/ansible_collections/community/general/plugins/modules/nmcli.py
@@ -52,23 +52,25 @@ options:
description:
- The interface to bind the connection to.
- The connection will only be applicable to this interface name.
- - A special value of C('*') can be used for interface-independent connections.
+ - A special value of V('*') can be used for interface-independent connections.
- The ifname argument is mandatory for all connection types except bond, team, bridge, vlan and vpn.
- - This parameter defaults to C(conn_name) when left unset for all connection types except vpn that removes it.
+ - This parameter defaults to O(conn_name) when left unset for all connection types except vpn that removes it.
type: str
type:
description:
- This is the type of device or network connection that you wish to create or modify.
- - Type C(dummy) is added in community.general 3.5.0.
- - Type C(generic) is added in Ansible 2.5.
- - Type C(infiniband) is added in community.general 2.0.0.
- - Type C(gsm) is added in community.general 3.7.0.
- - Type C(macvlan) is added in community.general 6.6.0.
- - Type C(wireguard) is added in community.general 4.3.0.
- - Type C(vpn) is added in community.general 5.1.0.
+ - Type V(dummy) is added in community.general 3.5.0.
+ - Type V(gsm) is added in community.general 3.7.0.
+ - Type V(infiniband) is added in community.general 2.0.0.
+ - Type V(loopback) is added in community.general 8.1.0.
+ - Type V(macvlan) is added in community.general 6.6.0.
+ - Type V(wireguard) is added in community.general 4.3.0.
+ - Type V(vpn) is added in community.general 5.1.0.
+ - Using V(bond-slave), V(bridge-slave), or V(team-slave) implies V(ethernet) connection type with corresponding O(slave_type) option.
+ - If you want to control non-ethernet connection attached to V(bond), V(bridge), or V(team) consider using O(slave_type) option.
type: str
choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, gre, infiniband, ipip, macvlan, sit, team, team-slave, vlan, vxlan,
- wifi, gsm, wireguard, vpn ]
+ wifi, gsm, wireguard, vpn, loopback ]
mode:
description:
- This is the type of device or network connection that you wish to create for a bond or bridge.
@@ -81,21 +83,28 @@ options:
type: str
choices: [ datagram, connected ]
version_added: 5.8.0
+ slave_type:
+ description:
+ - Type of the device of this slave's master connection (for example V(bond)).
+ type: str
+ choices: [ 'bond', 'bridge', 'team' ]
+ version_added: 7.0.0
master:
description:
- Master <master (ifname, or connection UUID or conn_name) of bridge, team, bond master connection profile.
+ - Mandatory if O(slave_type) is defined.
type: str
ip4:
description:
- List of IPv4 addresses to this interface.
- - Use the format C(192.0.2.24/24) or C(192.0.2.24).
- - If defined and I(method4) is not specified, automatically set C(ipv4.method) to C(manual).
+ - Use the format V(192.0.2.24/24) or V(192.0.2.24).
+ - If defined and O(method4) is not specified, automatically set C(ipv4.method) to V(manual).
type: list
elements: str
gw4:
description:
- The IPv4 gateway for this interface.
- - Use the format C(192.0.2.1).
+ - Use the format V(192.0.2.1).
- This parameter is mutually_exclusive with never_default4 parameter.
type: str
gw4_ignore_auto:
@@ -107,8 +116,8 @@ options:
routes4:
description:
- The list of IPv4 routes.
- - Use the format C(192.0.3.0/24 192.0.2.1).
- - To specify more complex routes, use the I(routes4_extended) option.
+ - Use the format V(192.0.3.0/24 192.0.2.1).
+ - To specify more complex routes, use the O(routes4_extended) option.
type: list
elements: str
version_added: 2.0.0
@@ -121,12 +130,12 @@ options:
ip:
description:
- IP or prefix of route.
- - Use the format C(192.0.3.0/24).
+ - Use the format V(192.0.3.0/24).
type: str
required: true
next_hop:
description:
- - Use the format C(192.0.2.1).
+ - Use the format V(192.0.2.1).
type: str
metric:
description:
@@ -160,7 +169,7 @@ options:
version_added: 2.0.0
routing_rules4:
description:
- - Is the same as in an C(ip route add) command, except always requires specifying a priority.
+ - Is the same as in an C(ip rule add) command, except always requires specifying a priority.
type: list
elements: str
version_added: 3.3.0
@@ -174,7 +183,7 @@ options:
dns4:
description:
- A list of up to 3 DNS servers.
- - The entries must be IPv4 addresses, for example C(192.0.2.53).
+ - The entries must be IPv4 addresses, for example V(192.0.2.53).
elements: str
type: list
dns4_search:
@@ -182,6 +191,12 @@ options:
- A list of DNS search domains.
elements: str
type: list
+ dns4_options:
+ description:
+ - A list of DNS options.
+ elements: str
+ type: list
+ version_added: 7.2.0
dns4_ignore_auto:
description:
- Ignore automatically configured IPv4 name servers.
@@ -191,28 +206,28 @@ options:
method4:
description:
- Configuration method to be used for IPv4.
- - If I(ip4) is set, C(ipv4.method) is automatically set to C(manual) and this parameter is not needed.
+ - If O(ip4) is set, C(ipv4.method) is automatically set to V(manual) and this parameter is not needed.
type: str
choices: [auto, link-local, manual, shared, disabled]
version_added: 2.2.0
may_fail4:
description:
- - If you need I(ip4) configured before C(network-online.target) is reached, set this option to C(false).
- - This option applies when C(method4) is not C(disabled).
+ - If you need O(ip4) configured before C(network-online.target) is reached, set this option to V(false).
+ - This option applies when O(method4) is not V(disabled).
type: bool
default: true
version_added: 3.3.0
ip6:
description:
- List of IPv6 addresses to this interface.
- - Use the format C(abbe::cafe/128) or C(abbe::cafe).
- - If defined and I(method6) is not specified, automatically set C(ipv6.method) to C(manual).
+ - Use the format V(abbe::cafe/128) or V(abbe::cafe).
+ - If defined and O(method6) is not specified, automatically set C(ipv6.method) to V(manual).
type: list
elements: str
gw6:
description:
- The IPv6 gateway for this interface.
- - Use the format C(2001:db8::1).
+ - Use the format V(2001:db8::1).
type: str
gw6_ignore_auto:
description:
@@ -223,8 +238,8 @@ options:
routes6:
description:
- The list of IPv6 routes.
- - Use the format C(fd12:3456:789a:1::/64 2001:dead:beef::1).
- - To specify more complex routes, use the I(routes6_extended) option.
+ - Use the format V(fd12:3456:789a:1::/64 2001:dead:beef::1).
+ - To specify more complex routes, use the O(routes6_extended) option.
type: list
elements: str
version_added: 4.4.0
@@ -237,12 +252,12 @@ options:
ip:
description:
- IP or prefix of route.
- - Use the format C(fd12:3456:789a:1::/64).
+ - Use the format V(fd12:3456:789a:1::/64).
type: str
required: true
next_hop:
description:
- - Use the format C(2001:dead:beef::1).
+ - Use the format V(2001:dead:beef::1).
type: str
metric:
description:
@@ -273,7 +288,7 @@ options:
dns6:
description:
- A list of up to 3 DNS servers.
- - The entries must be IPv6 addresses, for example C(2001:4860:4860::8888).
+ - The entries must be IPv6 addresses, for example V(2001:4860:4860::8888).
elements: str
type: list
dns6_search:
@@ -281,6 +296,12 @@ options:
- A list of DNS search domains.
elements: str
type: list
+ dns6_options:
+ description:
+ - A list of DNS options.
+ elements: str
+ type: list
+ version_added: 7.2.0
dns6_ignore_auto:
description:
- Ignore automatically configured IPv6 name servers.
@@ -290,8 +311,8 @@ options:
method6:
description:
- Configuration method to be used for IPv6
- - If I(ip6) is set, C(ipv6.method) is automatically set to C(manual) and this parameter is not needed.
- - C(disabled) was added in community.general 3.3.0.
+ - If O(ip6) is set, C(ipv6.method) is automatically set to V(manual) and this parameter is not needed.
+ - V(disabled) was added in community.general 3.3.0.
type: str
choices: [ignore, auto, dhcp, link-local, manual, shared, disabled]
version_added: 2.2.0
@@ -304,7 +325,7 @@ options:
addr_gen_mode6:
description:
- Configure method for creating the address for use with IPv6 Stateless Address Autoconfiguration.
- - C(default) and C(deafult-or-eui64) have been added in community.general 6.5.0.
+ - V(default) and V(default-or-eui64) have been added in community.general 6.5.0.
type: str
choices: [default, default-or-eui64, eui64, stable-privacy]
version_added: 4.2.0
@@ -312,7 +333,7 @@ options:
description:
- The connection MTU, e.g. 9000. This can't be applied when creating the interface and is done once the interface has been created.
- Can be used when modifying Team, VLAN, Ethernet (Future plans to implement wifi, gsm, pppoe, infiniband)
- - This parameter defaults to C(1500) when unset.
+ - This parameter defaults to V(1500) when unset.
type: int
dhcp_client_id:
description:
@@ -325,7 +346,7 @@ options:
miimon:
description:
- This is only used with bond - miimon.
- - This parameter defaults to C(100) when unset.
+ - This parameter defaults to V(100) when unset.
type: int
downdelay:
description:
@@ -397,9 +418,9 @@ options:
description:
- This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the
frame was received on.
- - The default value is C(true), but that is being deprecated
- and it will be changed to C(false) in community.general 7.0.0.
+ - The default change to V(false) in community.general 7.0.0. It used to be V(true) before.
type: bool
+ default: false
runner:
description:
- This is the type of device or network connection that you wish to create for a team.
@@ -417,9 +438,9 @@ options:
runner_fast_rate:
description:
- Option specifies the rate at which our link partner is asked to transmit LACPDU
- packets. If this is C(true) then packets will be sent once per second. Otherwise they
+ packets. If this is V(true) then packets will be sent once per second. Otherwise they
will be sent every 30 seconds.
- - Only allowed for C(lacp) runner.
+ - Only allowed for O(runner=lacp).
type: bool
version_added: 6.5.0
vlanid:
@@ -469,13 +490,13 @@ options:
ip_tunnel_input_key:
description:
- The key used for tunnel input packets.
- - Only used when I(type=gre).
+ - Only used when O(type=gre).
type: str
version_added: 3.6.0
ip_tunnel_output_key:
description:
- The key used for tunnel output packets.
- - Only used when I(type=gre).
+ - Only used when O(type=gre).
type: str
version_added: 3.6.0
zone:
@@ -491,23 +512,25 @@ options:
- 'An up-to-date list of supported attributes can be found here:
U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless-security.html).'
- 'For instance to use common WPA-PSK auth with a password:
- C({key-mgmt: wpa-psk, psk: my_password}).'
+ V({key-mgmt: wpa-psk, psk: my_password}).'
type: dict
suboptions:
auth-alg:
description:
- - When WEP is used (that is, if I(key-mgmt) = C(none) or C(ieee8021x)) indicate the 802.11 authentication algorithm required by the AP here.
- - One of C(open) for Open System, C(shared) for Shared Key, or C(leap) for Cisco LEAP.
- - When using Cisco LEAP (that is, if I(key-mgmt=ieee8021x) and I(auth-alg=leap)) the I(leap-username) and I(leap-password) properties
+ - When WEP is used (that is, if O(wifi_sec.key-mgmt) is V(none) or V(ieee8021x)) indicate the 802.11
+ authentication algorithm required by the AP here.
+ - One of V(open) for Open System, V(shared) for Shared Key, or V(leap) for Cisco LEAP.
+ - When using Cisco LEAP (that is, if O(wifi_sec.key-mgmt=ieee8021x) and O(wifi_sec.auth-alg=leap))
+ the O(wifi_sec.leap-username) and O(wifi_sec.leap-password) properties
must be specified.
type: str
choices: [ open, shared, leap ]
fils:
description:
- Indicates whether Fast Initial Link Setup (802.11ai) must be enabled for the connection.
- - One of C(0) (use global default value), C(1) (disable FILS), C(2) (enable FILS if the supplicant and the access point support it) or C(3)
+ - One of V(0) (use global default value), V(1) (disable FILS), V(2) (enable FILS if the supplicant and the access point support it) or V(3)
(enable FILS and fail if not supported).
- - When set to C(0) and no global default is set, FILS will be optionally enabled.
+ - When set to V(0) and no global default is set, FILS will be optionally enabled.
type: int
choices: [ 0, 1, 2, 3 ]
default: 0
@@ -522,20 +545,20 @@ options:
key-mgmt:
description:
- Key management used for the connection.
- - One of C(none) (WEP or no password protection), C(ieee8021x) (Dynamic WEP), C(owe) (Opportunistic Wireless Encryption), C(wpa-psk) (WPA2
- + WPA3 personal), C(sae) (WPA3 personal only), C(wpa-eap) (WPA2 + WPA3 enterprise) or C(wpa-eap-suite-b-192) (WPA3 enterprise only).
+ - One of V(none) (WEP or no password protection), V(ieee8021x) (Dynamic WEP), V(owe) (Opportunistic Wireless Encryption), V(wpa-psk) (WPA2
+ + WPA3 personal), V(sae) (WPA3 personal only), V(wpa-eap) (WPA2 + WPA3 enterprise) or V(wpa-eap-suite-b-192) (WPA3 enterprise only).
- This property must be set for any Wi-Fi connection that uses security.
type: str
choices: [ none, ieee8021x, owe, wpa-psk, sae, wpa-eap, wpa-eap-suite-b-192 ]
leap-password-flags:
- description: Flags indicating how to handle the I(leap-password) property.
+ description: Flags indicating how to handle the O(wifi_sec.leap-password) property.
type: list
elements: int
leap-password:
- description: The login password for legacy LEAP connections (that is, if I(key-mgmt=ieee8021x) and I(auth-alg=leap)).
+ description: The login password for legacy LEAP connections (that is, if O(wifi_sec.key-mgmt=ieee8021x) and O(wifi_sec.auth-alg=leap)).
type: str
leap-username:
- description: The login username for legacy LEAP connections (that is, if I(key-mgmt=ieee8021x) and I(auth-alg=leap)).
+ description: The login username for legacy LEAP connections (that is, if O(wifi_sec.key-mgmt=ieee8021x) and O(wifi_sec.auth-alg=leap)).
type: str
pairwise:
description:
@@ -548,68 +571,72 @@ options:
pmf:
description:
- Indicates whether Protected Management Frames (802.11w) must be enabled for the connection.
- - One of C(0) (use global default value), C(1) (disable PMF), C(2) (enable PMF if the supplicant and the access point support it) or C(3)
- (enable PMF and fail if not supported).
- - When set to C(0) and no global default is set, PMF will be optionally enabled.
+ - One of V(0) (use global default value), V(1) (disable PMF), V(2) (enable PMF if the
+ supplicant and the access point support it) or V(3) (enable PMF and fail if not supported).
+ - When set to V(0) and no global default is set, PMF will be optionally enabled.
type: int
choices: [ 0, 1, 2, 3 ]
default: 0
proto:
description:
- List of strings specifying the allowed WPA protocol versions to use.
- - Each element may be C(wpa) (allow WPA) or C(rsn) (allow WPA2/RSN).
+ - Each element may be V(wpa) (allow WPA) or V(rsn) (allow WPA2/RSN).
- If not specified, both WPA and RSN connections are allowed.
type: list
elements: str
choices: [ wpa, rsn ]
psk-flags:
- description: Flags indicating how to handle the I(psk) property.
+ description: Flags indicating how to handle the O(wifi_sec.psk) property.
type: list
elements: int
psk:
description:
- Pre-Shared-Key for WPA networks.
- - For WPA-PSK, it is either an ASCII passphrase of 8 to 63 characters that is (as specified in the 802.11i standard) hashed to derive the
+ - For WPA-PSK, it is either an ASCII passphrase of 8 to 63 characters that is
+ (as specified in the 802.11i standard) hashed to derive the
actual key, or the key in form of 64 hexadecimal character.
- The WPA3-Personal networks use a passphrase of any length for SAE authentication.
type: str
wep-key-flags:
- description: Flags indicating how to handle the I(wep-key0), I(wep-key1), I(wep-key2), and I(wep-key3) properties.
+ description:
+ - Flags indicating how to handle the O(wifi_sec.wep-key0), O(wifi_sec.wep-key1),
+ O(wifi_sec.wep-key2), and O(wifi_sec.wep-key3) properties.
type: list
elements: int
wep-key-type:
description:
- Controls the interpretation of WEP keys.
- - Allowed values are C(1), in which case the key is either a 10- or 26-character hexadecimal string, or a 5- or 13-character ASCII
- password; or C(2), in which case the passphrase is provided as a string and will be hashed using the de-facto MD5 method to derive the
+ - Allowed values are V(1), in which case the key is either a 10- or 26-character hexadecimal string, or a 5- or 13-character ASCII
+ password; or V(2), in which case the passphrase is provided as a string and will be hashed using the de-facto MD5 method to derive the
actual WEP key.
type: int
choices: [ 1, 2 ]
wep-key0:
description:
- Index 0 WEP key. This is the WEP key used in most networks.
- - See the I(wep-key-type) property for a description of how this key is interpreted.
+ - See the O(wifi_sec.wep-key-type) property for a description of how this key is interpreted.
type: str
wep-key1:
description:
- Index 1 WEP key. This WEP index is not used by most networks.
- - See the I(wep-key-type) property for a description of how this key is interpreted.
+ - See the O(wifi_sec.wep-key-type) property for a description of how this key is interpreted.
type: str
wep-key2:
description:
- Index 2 WEP key. This WEP index is not used by most networks.
- - See the I(wep-key-type) property for a description of how this key is interpreted.
+ - See the O(wifi_sec.wep-key-type) property for a description of how this key is interpreted.
type: str
wep-key3:
description:
- Index 3 WEP key. This WEP index is not used by most networks.
- - See the I(wep-key-type) property for a description of how this key is interpreted.
+ - See the O(wifi_sec.wep-key-type) property for a description of how this key is interpreted.
type: str
wep-tx-keyidx:
description:
- - When static WEP is used (that is, if I(key-mgmt=none)) and a non-default WEP key index is used by the AP, put that WEP key index here.
- - Valid values are C(0) (default key) through C(3).
- - Note that some consumer access points (like the Linksys WRT54G) number the keys C(1) - C(4).
+ - When static WEP is used (that is, if O(wifi_sec.key-mgmt=none)) and a non-default WEP key index
+ is used by the AP, put that WEP key index here.
+ - Valid values are V(0) (default key) through V(3).
+ - Note that some consumer access points (like the Linksys WRT54G) number the keys V(1) to V(4).
type: int
choices: [ 0, 1, 2, 3 ]
default: 0
@@ -618,7 +645,7 @@ options:
- Flags indicating which mode of WPS is to be used if any.
- There is little point in changing the default setting as NetworkManager will automatically determine whether it is feasible to start WPS
enrollment from the Access Point capabilities.
- - WPS can be disabled by setting this property to a value of C(1).
+ - WPS can be disabled by setting this property to a value of V(1).
type: int
default: 0
version_added: 3.0.0
@@ -634,34 +661,34 @@ options:
- 'An up-to-date list of supported attributes can be found here:
U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless.html).'
- 'For instance to create a hidden AP mode WiFi connection:
- C({hidden: true, mode: ap}).'
+ V({hidden: true, mode: ap}).'
type: dict
suboptions:
ap-isolation:
description:
- Configures AP isolation, which prevents communication between wireless devices connected to this AP.
- - This property can be set to a value different from C(-1) only when the interface is configured in AP mode.
- - If set to C(1), devices are not able to communicate with each other. This increases security because it protects devices against attacks
+ - This property can be set to a value different from V(-1) only when the interface is configured in AP mode.
+ - If set to V(1), devices are not able to communicate with each other. This increases security because it protects devices against attacks
from other clients in the network. At the same time, it prevents devices to access resources on the same wireless networks as file
shares, printers, etc.
- - If set to C(0), devices can talk to each other.
- - When set to C(-1), the global default is used; in case the global default is unspecified it is assumed to be C(0).
+ - If set to V(0), devices can talk to each other.
+ - When set to V(-1), the global default is used; in case the global default is unspecified it is assumed to be V(0).
type: int
choices: [ -1, 0, 1 ]
default: -1
assigned-mac-address:
description:
- The new field for the cloned MAC address.
- - It can be either a hardware address in ASCII representation, or one of the special values C(preserve), C(permanent), C(random) or
- C(stable).
- - This field replaces the deprecated I(cloned-mac-address) on D-Bus, which can only contain explicit hardware addresses.
- - Note that this property only exists in D-Bus API. libnm and nmcli continue to call this property I(cloned-mac-address).
+ - It can be either a hardware address in ASCII representation, or one of the special values V(preserve), V(permanent), V(random) or
+ V(stable).
+ - This field replaces the deprecated O(wifi.cloned-mac-address) on D-Bus, which can only contain explicit hardware addresses.
+ - Note that this property only exists in D-Bus API. libnm and nmcli continue to call this property C(cloned-mac-address).
type: str
band:
description:
- 802.11 frequency band of the network.
- - One of C(a) for 5GHz 802.11a or C(bg) for 2.4GHz 802.11.
- - This will lock associations to the Wi-Fi network to the specific band, so for example, if C(a) is specified, the device will not
+ - One of V(a) for 5GHz 802.11a or V(bg) for 2.4GHz 802.11.
+ - This will lock associations to the Wi-Fi network to the specific band, so for example, if V(a) is specified, the device will not
associate with the same network in the 2.4GHz band even if the network's settings are compatible.
- This setting depends on specific driver capability and may not work with all drivers.
type: str
@@ -676,38 +703,38 @@ options:
description:
- Wireless channel to use for the Wi-Fi connection.
- The device will only join (or create for Ad-Hoc networks) a Wi-Fi network on the specified channel.
- - Because channel numbers overlap between bands, this property also requires the I(band) property to be set.
+ - Because channel numbers overlap between bands, this property also requires the O(wifi.band) property to be set.
type: int
default: 0
cloned-mac-address:
description:
- - This D-Bus field is deprecated in favor of I(assigned-mac-address) which is more flexible and allows specifying special variants like
- C(random).
- - For libnm and nmcli, this field is called I(cloned-mac-address).
+ - This D-Bus field is deprecated in favor of O(wifi.assigned-mac-address) which is more flexible and allows specifying special variants like
+ V(random).
+ - For libnm and nmcli, this field is called C(cloned-mac-address).
type: str
generate-mac-address-mask:
description:
- - With I(cloned-mac-address) setting C(random) or C(stable), by default all bits of the MAC address are scrambled and a
+ - With O(wifi.cloned-mac-address) setting V(random) or V(stable), by default all bits of the MAC address are scrambled and a
locally-administered, unicast MAC address is created. This property allows to specify that certain bits are fixed.
- Note that the least significant bit of the first MAC address will always be unset to create a unicast MAC address.
- - If the property is C(null), it is eligible to be overwritten by a default connection setting.
- - If the value is still c(null) or an empty string, the default is to create a locally-administered, unicast MAC address.
+ - If the property is V(null), it is eligible to be overwritten by a default connection setting.
+ - If the value is still V(null) or an empty string, the default is to create a locally-administered, unicast MAC address.
- If the value contains one MAC address, this address is used as mask. The set bits of the mask are to be filled with the current MAC
address of the device, while the unset bits are subject to randomization.
- - Setting C(FE:FF:FF:00:00:00) means to preserve the OUI of the current MAC address and only randomize the lower 3 bytes using the
- C(random) or C(stable) algorithm.
+ - Setting V(FE:FF:FF:00:00:00) means to preserve the OUI of the current MAC address and only randomize the lower 3 bytes using the
+ V(random) or V(stable) algorithm.
- If the value contains one additional MAC address after the mask, this address is used instead of the current MAC address to fill the bits
that shall not be randomized.
- - For example, a value of C(FE:FF:FF:00:00:00 68:F7:28:00:00:00) will set the OUI of the MAC address to 68:F7:28, while the lower bits are
+ - For example, a value of V(FE:FF:FF:00:00:00 68:F7:28:00:00:00) will set the OUI of the MAC address to 68:F7:28, while the lower bits are
randomized.
- - A value of C(02:00:00:00:00:00 00:00:00:00:00:00) will create a fully scrambled globally-administered, burned-in MAC address.
+ - A value of V(02:00:00:00:00:00 00:00:00:00:00:00) will create a fully scrambled globally-administered, burned-in MAC address.
- If the value contains more than one additional MAC addresses, one of them is chosen randomly. For example,
- C(02:00:00:00:00:00 00:00:00:00:00:00 02:00:00:00:00:00) will create a fully scrambled MAC address, randomly locally or globally
+ V(02:00:00:00:00:00 00:00:00:00:00:00 02:00:00:00:00:00) will create a fully scrambled MAC address, randomly locally or globally
administered.
type: str
hidden:
description:
- - If C(true), indicates that the network is a non-broadcasting network that hides its SSID. This works both in infrastructure and AP mode.
+ - If V(true), indicates that the network is a non-broadcasting network that hides its SSID. This works both in infrastructure and AP mode.
- In infrastructure mode, various workarounds are used for a more reliable discovery of hidden networks, such as probe-scanning the SSID.
However, these workarounds expose inherent insecurities with hidden SSID networks, and thus hidden SSID networks should be used with
caution.
@@ -719,14 +746,14 @@ options:
mac-address-blacklist:
description:
- A list of permanent MAC addresses of Wi-Fi devices to which this connection should never apply.
- - Each MAC address should be given in the standard hex-digits-and-colons notation (for example, C(00:11:22:33:44:55)).
+ - Each MAC address should be given in the standard hex-digits-and-colons notation (for example, V(00:11:22:33:44:55)).
type: list
elements: str
mac-address-randomization:
description:
- - One of C(0) (never randomize unless the user has set a global default to randomize and the supplicant supports randomization), C(1)
- (never randomize the MAC address), or C(2) (always randomize the MAC address).
- - This property is deprecated for I(cloned-mac-address).
+ - One of V(0) (never randomize unless the user has set a global default to randomize and the supplicant supports randomization), V(1)
+ (never randomize the MAC address), or V(2) (always randomize the MAC address).
+ - This property is deprecated for O(wifi.cloned-mac-address).
type: int
default: 0
choices: [ 0, 1, 2 ]
@@ -736,7 +763,7 @@ options:
- This property does not change the MAC address of the device (for example for MAC spoofing).
type: str
mode:
- description: Wi-Fi network mode. If blank, C(infrastructure) is assumed.
+ description: Wi-Fi network mode. If blank, V(infrastructure) is assumed.
type: str
choices: [ infrastructure, mesh, adhoc, ap ]
default: infrastructure
@@ -746,7 +773,7 @@ options:
default: 0
powersave:
description:
- - One of C(2) (disable Wi-Fi power saving), C(3) (enable Wi-Fi power saving), C(1) (don't touch currently configure setting) or C(0) (use
+ - One of V(2) (disable Wi-Fi power saving), V(3) (enable Wi-Fi power saving), V(1) (don't touch currently configure setting) or V(0) (use
the globally configured value).
- All other values are reserved.
type: int
@@ -755,7 +782,7 @@ options:
rate:
description:
- If non-zero, directs the device to only use the specified bitrate for communication with the access point.
- - Units are in Kb/s, so for example C(5500) = 5.5 Mbit/s.
+ - Units are in Kb/s, so for example V(5500) = 5.5 Mbit/s.
- This property is highly driver dependent and not all devices support setting a static bitrate.
type: int
default: 0
@@ -769,11 +796,11 @@ options:
wake-on-wlan:
description:
- The NMSettingWirelessWakeOnWLan options to enable. Not all devices support all options.
- - May be any combination of C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_ANY) (C(0x2)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_DISCONNECT) (C(0x4)),
- C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_MAGIC) (C(0x8)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_GTK_REKEY_FAILURE) (C(0x10)),
- C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_EAP_IDENTITY_REQUEST) (C(0x20)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_4WAY_HANDSHAKE) (C(0x40)),
- C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_RFKILL_RELEASE) (C(0x80)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_TCP) (C(0x100)) or the special values
- C(0x1) (to use global settings) and C(0x8000) (to disable management of Wake-on-LAN in NetworkManager).
+ - May be any combination of C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_ANY) (V(0x2)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_DISCONNECT) (V(0x4)),
+ C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_MAGIC) (V(0x8)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_GTK_REKEY_FAILURE) (V(0x10)),
+ C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_EAP_IDENTITY_REQUEST) (V(0x20)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_4WAY_HANDSHAKE) (V(0x40)),
+ C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_RFKILL_RELEASE) (V(0x80)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_TCP) (V(0x100)) or the special values
+ V(0x1) (to use global settings) and V(0x8000) (to disable management of Wake-on-LAN in NetworkManager).
- Note the option values' sum must be specified in order to combine multiple options.
type: int
default: 1
@@ -781,7 +808,7 @@ options:
ignore_unsupported_suboptions:
description:
- Ignore suboptions which are invalid or unsupported by the version of NetworkManager/nmcli installed on the host.
- - Only I(wifi) and I(wifi_sec) options are currently affected.
+ - Only O(wifi) and O(wifi_sec) options are currently affected.
type: bool
default: false
version_added: 3.6.0
@@ -792,7 +819,7 @@ options:
- 'An up-to-date list of supported attributes can be found here:
U(https://networkmanager.dev/docs/api/latest/settings-gsm.html).'
- 'For instance to use apn, pin, username and password:
- C({apn: provider.apn, pin: 1234, username: apn.username, password: apn.password}).'
+ V({apn: provider.apn, pin: 1234, username: apn.username, password: apn.password}).'
type: dict
version_added: 3.7.0
suboptions:
@@ -804,18 +831,18 @@ options:
- The APN may only be composed of the characters a-z, 0-9, ., and - per GSM 03.60 Section 14.9.
type: str
auto-config:
- description: When C(true), the settings such as I(gsm.apn), I(gsm.username), or I(gsm.password) will default to values that match the network
+ description: When V(true), the settings such as O(gsm.apn), O(gsm.username), or O(gsm.password) will default to values that match the network
the modem will register to in the Mobile Broadband Provider database.
type: bool
default: false
device-id:
description:
- - The device unique identifier (as given by the C(WWAN) management service) which this connection applies to.
+ - The device unique identifier (as given by the V(WWAN) management service) which this connection applies to.
- If given, the connection will only apply to the specified device.
type: str
home-only:
description:
- - When C(true), only connections to the home network will be allowed.
+ - When V(true), only connections to the home network will be allowed.
- Connections to roaming networks will not be made.
type: bool
default: false
@@ -840,13 +867,13 @@ options:
type: str
password-flags:
description:
- - NMSettingSecretFlags indicating how to handle the I(password) property.
+ - NMSettingSecretFlags indicating how to handle the O(gsm.password) property.
- 'Following choices are allowed:
- C(0) B(NONE): The system is responsible for providing and storing this secret (default),
- C(1) B(AGENT_OWNED): A user secret agent is responsible for providing and storing this secret; when it is required agents will be
+ V(0) B(NONE): The system is responsible for providing and storing this secret (default),
+ V(1) B(AGENT_OWNED): A user secret agent is responsible for providing and storing this secret; when it is required agents will be
asked to retrieve it
- C(2) B(NOT_SAVED): This secret should not be saved, but should be requested from the user each time it is needed
- C(4) B(NOT_REQUIRED): In situations where it cannot be automatically determined that the secret is required
+ V(2) B(NOT_SAVED): This secret should not be saved, but should be requested from the user each time it is needed
+ V(4) B(NOT_REQUIRED): In situations where it cannot be automatically determined that the secret is required
(some VPNs and PPP providers do not require all secrets) this flag indicates that the specific secret is not required.'
type: int
choices: [ 0, 1, 2 , 4 ]
@@ -858,21 +885,21 @@ options:
type: str
pin-flags:
description:
- - NMSettingSecretFlags indicating how to handle the I(gsm.pin) property.
- - See I(gsm.password-flags) for NMSettingSecretFlags choices.
+ - NMSettingSecretFlags indicating how to handle the O(gsm.pin) property.
+ - See O(gsm.password-flags) for NMSettingSecretFlags choices.
type: int
choices: [ 0, 1, 2 , 4 ]
default: 0
sim-id:
description:
- The SIM card unique identifier (as given by the C(WWAN) management service) which this connection applies to.
- - 'If given, the connection will apply to any device also allowed by I(gsm.device-id) which contains a SIM card matching
+ - 'If given, the connection will apply to any device also allowed by O(gsm.device-id) which contains a SIM card matching
the given identifier.'
type: str
sim-operator-id:
description:
- - A MCC/MNC string like C(310260) or C(21601I) identifying the specific mobile network operator which this connection applies to.
- - 'If given, the connection will apply to any device also allowed by I(gsm.device-id) and I(gsm.sim-id) which contains a SIM card
+ - A MCC/MNC string like V(310260) or V(21601I) identifying the specific mobile network operator which this connection applies to.
+ - 'If given, the connection will apply to any device also allowed by O(gsm.device-id) and O(gsm.sim-id) which contains a SIM card
provisioned by the given operator.'
type: str
username:
@@ -892,8 +919,8 @@ options:
mode:
description:
- The macvlan mode, which specifies the communication mechanism between multiple macvlans on the same lower device.
- - 'Following choices are allowed: C(1) B(vepa), C(2) B(bridge), C(3) B(private), C(4) B(passthru)
- and C(5) B(source)'
+ - 'Following choices are allowed: V(1) B(vepa), V(2) B(bridge), V(3) B(private), V(4) B(passthru)
+ and V(5) B(source)'
type: int
choices: [ 1, 2, 3, 4, 5 ]
required: true
@@ -919,7 +946,7 @@ options:
- 'An up-to-date list of supported attributes can be found here:
U(https://networkmanager.dev/docs/api/latest/settings-wireguard.html).'
- 'For instance to configure a listen port:
- C({listen-port: 12345}).'
+ V({listen-port: 12345}).'
type: dict
version_added: 4.3.0
suboptions:
@@ -927,19 +954,19 @@ options:
description:
- The 32-bit fwmark for outgoing packets.
- The use of fwmark is optional and is by default off. Setting it to 0 disables it.
- - Note that I(wireguard.ip4-auto-default-route) or I(wireguard.ip6-auto-default-route) enabled, implies to automatically choose a fwmark.
+ - Note that O(wireguard.ip4-auto-default-route) or O(wireguard.ip6-auto-default-route) enabled, implies to automatically choose a fwmark.
type: int
ip4-auto-default-route:
description:
- Whether to enable special handling of the IPv4 default route.
- - If enabled, the IPv4 default route from I(wireguard.peer-routes) will be placed to a dedicated routing-table and two policy
+ - If enabled, the IPv4 default route from O(wireguard.peer-routes) will be placed to a dedicated routing-table and two policy
routing rules will be added.
- The fwmark number is also used as routing-table for the default-route, and if fwmark is zero, an unused fwmark/table is chosen
automatically. This corresponds to what wg-quick does with Table=auto and what WireGuard calls "Improved Rule-based Routing"
type: bool
ip6-auto-default-route:
description:
- - Like I(wireguard.ip4-auto-default-route), but for the IPv6 default route.
+ - Like O(wireguard.ip4-auto-default-route), but for the IPv6 default route.
type: bool
listen-port:
description: The WireGuard connection listen-port. If not specified, the port will be chosen randomly when the
@@ -954,18 +981,18 @@ options:
peer-routes:
description:
- Whether to automatically add routes for the AllowedIPs ranges of the peers.
- - If C(true) (the default), NetworkManager will automatically add routes in the routing tables according to C(ipv4.route-table) and
+ - If V(true) (the default), NetworkManager will automatically add routes in the routing tables according to C(ipv4.route-table) and
C(ipv6.route-table). Usually you want this automatism enabled.
- - If C(false), no such routes are added automatically. In this case, the user may want to configure static routes in C(ipv4.routes)
+ - If V(false), no such routes are added automatically. In this case, the user may want to configure static routes in C(ipv4.routes)
and C(ipv6.routes), respectively.
- - Note that if the peer's AllowedIPs is C(0.0.0.0/0) or C(::/0) and the profile's C(ipv4.never-default) or C(ipv6.never-default)
+ - Note that if the peer's AllowedIPs is V(0.0.0.0/0) or V(::/0) and the profile's C(ipv4.never-default) or C(ipv6.never-default)
setting is enabled, the peer route for this peer won't be added automatically.
type: bool
private-key:
description: The 256 bit private-key in base64 encoding.
type: str
private-key-flags:
- description: C(NMSettingSecretFlags) indicating how to handle the I(wireguard.private-key) property.
+ description: C(NMSettingSecretFlags) indicating how to handle the O(wireguard.private-key) property.
type: int
choices: [ 0, 1, 2 ]
vpn:
@@ -985,19 +1012,19 @@ options:
type: str
required: true
gateway:
- description: The gateway to connection. It can be an IP address (for example C(192.0.2.1))
- or a FQDN address (for example C(vpn.example.com)).
+ description: The gateway to connection. It can be an IP address (for example V(192.0.2.1))
+ or a FQDN address (for example V(vpn.example.com)).
type: str
required: true
password-flags:
description:
- - NMSettingSecretFlags indicating how to handle the I(password) property.
+ - NMSettingSecretFlags indicating how to handle the C(vpn.password) property.
- 'Following choices are allowed:
- C(0) B(NONE): The system is responsible for providing and storing this secret (default);
- C(1) B(AGENT_OWNED): A user secret agent is responsible for providing and storing this secret; when it is required agents will be
+ V(0) B(NONE): The system is responsible for providing and storing this secret (default);
+ V(1) B(AGENT_OWNED): A user secret agent is responsible for providing and storing this secret; when it is required agents will be
asked to retrieve it;
- C(2) B(NOT_SAVED): This secret should not be saved, but should be requested from the user each time it is needed;
- C(4) B(NOT_REQUIRED): In situations where it cannot be automatically determined that the secret is required
+ V(2) B(NOT_SAVED): This secret should not be saved, but should be requested from the user each time it is needed;
+ V(4) B(NOT_REQUIRED): In situations where it cannot be automatically determined that the secret is required
(some VPNs and PPP providers do not require all secrets) this flag indicates that the specific secret is not required.'
type: int
choices: [ 0, 1, 2 , 4 ]
@@ -1009,14 +1036,14 @@ options:
ipsec-enabled:
description:
- Enable or disable IPSec tunnel to L2TP host.
- - This option is need when C(service-type) is C(org.freedesktop.NetworkManager.l2tp).
+ - This option is need when O(vpn.service-type) is V(org.freedesktop.NetworkManager.l2tp).
type: bool
ipsec-psk:
description:
- The pre-shared key in base64 encoding.
- >
- You can encode using this Ansible jinja2 expression: C("0s{{ '[YOUR PRE-SHARED KEY]' | ansible.builtin.b64encode }}").
- - This is only used when I(ipsec-enabled=true).
+ You can encode using this Ansible jinja2 expression: V("0s{{ '[YOUR PRE-SHARED KEY]' | ansible.builtin.b64encode }}").
+ - This is only used when O(vpn.ipsec-enabled=true).
type: str
'''
@@ -1429,6 +1456,55 @@ EXAMPLES = r'''
autoconnect: false
state: present
+## Creating bond attached to bridge example
+- name: Create bond attached to bridge
+ community.general.nmcli:
+ type: bond
+ conn_name: bond0
+ slave_type: bridge
+ master: br0
+ state: present
+
+- name: Create master bridge
+ community.general.nmcli:
+ type: bridge
+ conn_name: br0
+ method4: disabled
+ method6: disabled
+ state: present
+
+## Creating vlan connection attached to bridge
+- name: Create master bridge
+ community.general.nmcli:
+ type: bridge
+ conn_name: br0
+ state: present
+
+- name: Create VLAN 5
+ community.general.nmcli:
+ type: vlan
+ conn_name: eth0.5
+ slave_type: bridge
+ master: br0
+ vlandev: eth0
+ vlanid: 5
+ state: present
+
+## Defining ip rules while setting a static IP
+## table 'production' is set with id 200 in this example.
+- name: Set Static ips for interface with ip rules and routes
+ community.general.nmcli:
+ type: ethernet
+ conn_name: 'eth0'
+ ip4: '192.168.1.50'
+ gw4: '192.168.1.1'
+ state: present
+ routes4_extended:
+ - ip: "0.0.0.0/0"
+ next_hop: "192.168.1.1"
+ table: "production"
+ routing_rules4:
+ - "priority 0 from 192.168.1.50 table 200"
'''
RETURN = r"""#
@@ -1475,6 +1551,7 @@ class Nmcli(object):
self.ignore_unsupported_suboptions = module.params['ignore_unsupported_suboptions']
self.autoconnect = module.params['autoconnect']
self.conn_name = module.params['conn_name']
+ self.slave_type = module.params['slave_type']
self.master = module.params['master']
self.ifname = module.params['ifname']
self.type = module.params['type']
@@ -1488,6 +1565,7 @@ class Nmcli(object):
self.never_default4 = module.params['never_default4']
self.dns4 = module.params['dns4']
self.dns4_search = module.params['dns4_search']
+ self.dns4_options = module.params['dns4_options']
self.dns4_ignore_auto = module.params['dns4_ignore_auto']
self.method4 = module.params['method4']
self.may_fail4 = module.params['may_fail4']
@@ -1499,6 +1577,7 @@ class Nmcli(object):
self.route_metric6 = module.params['route_metric6']
self.dns6 = module.params['dns6']
self.dns6_search = module.params['dns6_search']
+ self.dns6_options = module.params['dns6_options']
self.dns6_ignore_auto = module.params['dns6_ignore_auto']
self.method6 = module.params['method6']
self.ip_privacy6 = module.params['ip_privacy6']
@@ -1519,8 +1598,7 @@ class Nmcli(object):
self.hellotime = module.params['hellotime']
self.maxage = module.params['maxage']
self.ageingtime = module.params['ageingtime']
- # hairpin should be back to normal in 7.0.0
- self._hairpin = module.params['hairpin']
+ self.hairpin = module.params['hairpin']
self.path_cost = module.params['path_cost']
self.mac = module.params['mac']
self.runner = module.params['runner']
@@ -1571,17 +1649,13 @@ class Nmcli(object):
self.edit_commands = []
- @property
- def hairpin(self):
- if self._hairpin is None:
- self.module.deprecate(
- "Parameter 'hairpin' default value will change from true to false in community.general 7.0.0. "
- "Set the value explicitly to suppress this warning.",
- version='7.0.0', collection_name='community.general',
- )
- # Should be False in 7.0.0 but then that should be in argument_specs
- self._hairpin = True
- return self._hairpin
+ self.extra_options_validation()
+
+ def extra_options_validation(self):
+ """ Additional validation of options set passed to module that cannot be implemented in module's argspecs. """
+ if self.type not in ("bridge-slave", "team-slave", "bond-slave"):
+ if self.master is None and self.slave_type is not None:
+ self.module.fail_json(msg="'master' option is required when 'slave_type' is specified.")
def execute_command(self, cmd, use_unsafe_shell=False, data=None):
if isinstance(cmd, list):
@@ -1610,6 +1684,7 @@ class Nmcli(object):
'ipv4.dhcp-client-id': self.dhcp_client_id,
'ipv4.dns': self.dns4,
'ipv4.dns-search': self.dns4_search,
+ 'ipv4.dns-options': self.dns4_options,
'ipv4.ignore-auto-dns': self.dns4_ignore_auto,
'ipv4.gateway': self.gw4,
'ipv4.ignore-auto-routes': self.gw4_ignore_auto,
@@ -1622,6 +1697,7 @@ class Nmcli(object):
'ipv6.addresses': self.enforce_ipv6_cidr_notation(self.ip6),
'ipv6.dns': self.dns6,
'ipv6.dns-search': self.dns6_search,
+ 'ipv6.dns-options': self.dns6_options,
'ipv6.ignore-auto-dns': self.dns6_ignore_auto,
'ipv6.gateway': self.gw6,
'ipv6.ignore-auto-routes': self.gw6_ignore_auto,
@@ -1647,6 +1723,7 @@ class Nmcli(object):
if self.slave_conn_type:
options.update({
'connection.master': self.master,
+ 'connection.slave-type': self.slave_type,
})
# Options specific to a connection type.
@@ -1662,9 +1739,17 @@ class Nmcli(object):
'xmit_hash_policy': self.xmit_hash_policy,
})
elif self.type == 'bond-slave':
- options.update({
- 'connection.slave-type': 'bond',
- })
+ if self.slave_type and self.slave_type != 'bond':
+ self.module.fail_json(msg="Connection type '%s' cannot be combined with '%s' slave-type. "
+ "Allowed slave-type for '%s' is 'bond'."
+ % (self.type, self.slave_type, self.type)
+ )
+ if not self.slave_type:
+ self.module.warn("Connection 'slave-type' property automatically set to 'bond' "
+ "because of using 'bond-slave' connection type.")
+ options.update({
+ 'connection.slave-type': 'bond',
+ })
elif self.type == 'bridge':
options.update({
'bridge.ageing-time': self.ageingtime,
@@ -1674,7 +1759,7 @@ class Nmcli(object):
'bridge.priority': self.priority,
'bridge.stp': self.stp,
})
- # priority make sense when stp enabed, otherwise nmcli keeps bridge-priority to 32768 regrdless of input.
+ # priority make sense when stp enabled, otherwise nmcli keeps bridge-priority to 32768 regrdless of input.
# force ignoring to save idempotency
if self.stp:
options.update({'bridge.priority': self.priority})
@@ -1688,16 +1773,36 @@ class Nmcli(object):
'team.runner-fast-rate': self.runner_fast_rate,
})
elif self.type == 'bridge-slave':
+ if self.slave_type and self.slave_type != 'bridge':
+ self.module.fail_json(msg="Connection type '%s' cannot be combined with '%s' slave-type. "
+ "Allowed slave-type for '%s' is 'bridge'."
+ % (self.type, self.slave_type, self.type)
+ )
+ if not self.slave_type:
+ self.module.warn("Connection 'slave-type' property automatically set to 'bridge' "
+ "because of using 'bridge-slave' connection type.")
+ options.update({'connection.slave-type': 'bridge'})
+ self.module.warn(
+ "Connection type as 'bridge-slave' implies 'ethernet' connection with 'bridge' slave-type. "
+ "Consider using slave_type='bridge' with necessary type."
+ )
options.update({
- 'connection.slave-type': 'bridge',
'bridge-port.path-cost': self.path_cost,
'bridge-port.hairpin-mode': self.hairpin,
'bridge-port.priority': self.slavepriority,
})
elif self.type == 'team-slave':
- options.update({
- 'connection.slave-type': 'team',
- })
+ if self.slave_type and self.slave_type != 'team':
+ self.module.fail_json(msg="Connection type '%s' cannot be combined with '%s' slave-type. "
+ "Allowed slave-type for '%s' is 'team'."
+ % (self.type, self.slave_type, self.type)
+ )
+ if not self.slave_type:
+ self.module.warn("Connection 'slave-type' property automatically set to 'team' "
+ "because of using 'team-slave' connection type.")
+ options.update({
+ 'connection.slave-type': 'team',
+ })
elif self.tunnel_conn_type:
options.update({
'ip-tunnel.local': self.ip_tunnel_local,
@@ -1727,7 +1832,7 @@ class Nmcli(object):
elif self.type == 'wifi':
options.update({
'802-11-wireless.ssid': self.ssid,
- 'connection.slave-type': 'bond' if self.master else None,
+ 'connection.slave-type': ('bond' if self.slave_type is None else self.slave_type) if self.master else None,
})
if self.wifi:
for name, value in self.wifi.items():
@@ -1833,6 +1938,7 @@ class Nmcli(object):
'macvlan',
'wireguard',
'vpn',
+ 'loopback',
)
@property
@@ -1845,15 +1951,21 @@ class Nmcli(object):
@property
def mtu_conn_type(self):
return self.type in (
+ 'bond',
+ 'bond-slave',
'dummy',
'ethernet',
+ 'infiniband',
'team-slave',
'vlan',
)
@property
def mtu_setting(self):
- return '802-3-ethernet.mtu'
+ if self.type == 'infiniband':
+ return 'infiniband.mtu'
+ else:
+ return '802-3-ethernet.mtu'
@staticmethod
def mtu_to_string(mtu):
@@ -1882,10 +1994,17 @@ class Nmcli(object):
@property
def slave_conn_type(self):
return self.type in (
+ 'ethernet',
+ 'bridge',
+ 'bond',
+ 'vlan',
+ 'team',
+ 'wifi',
'bond-slave',
'bridge-slave',
'team-slave',
'wifi',
+ 'infiniband',
)
@property
@@ -1963,10 +2082,12 @@ class Nmcli(object):
'ipv6.addresses',
'ipv4.dns',
'ipv4.dns-search',
+ 'ipv4.dns-options',
'ipv4.routes',
'ipv4.routing-rules',
'ipv6.dns',
'ipv6.dns-search',
+ 'ipv6.dns-options',
'ipv6.routes',
'802-11-wireless-security.group',
'802-11-wireless-security.leap-password-flags',
@@ -2104,7 +2225,10 @@ class Nmcli(object):
if key and len(pair) > 1:
raw_value = pair[1].lstrip()
if raw_value == '--':
- conn_info[key] = None
+ if key_type == list:
+ conn_info[key] = []
+ else:
+ conn_info[key] = None
elif key == 'bond.options':
# Aliases such as 'miimon', 'downdelay' are equivalent to the +bond.options 'option=value' syntax.
opts = raw_value.split(',')
@@ -2191,7 +2315,7 @@ class Nmcli(object):
# We can't just do `if not value` because then if there's a value
# of 0 specified as an integer it'll be interpreted as empty when
# it actually isn't.
- if value != 0 and not value:
+ if value not in (0, []) and not value:
continue
if key in conn_info:
@@ -2276,6 +2400,7 @@ def main():
state=dict(type='str', required=True, choices=['absent', 'present']),
conn_name=dict(type='str', required=True),
master=dict(type='str'),
+ slave_type=dict(type='str', choices=['bond', 'bridge', 'team']),
ifname=dict(type='str'),
type=dict(type='str',
choices=[
@@ -2299,6 +2424,7 @@ def main():
'macvlan',
'wireguard',
'vpn',
+ 'loopback',
]),
ip4=dict(type='list', elements='str'),
gw4=dict(type='str'),
@@ -2321,6 +2447,7 @@ def main():
never_default4=dict(type='bool', default=False),
dns4=dict(type='list', elements='str'),
dns4_search=dict(type='list', elements='str'),
+ dns4_options=dict(type='list', elements='str'),
dns4_ignore_auto=dict(type='bool', default=False),
method4=dict(type='str', choices=['auto', 'link-local', 'manual', 'shared', 'disabled']),
may_fail4=dict(type='bool', default=True),
@@ -2330,6 +2457,7 @@ def main():
gw6_ignore_auto=dict(type='bool', default=False),
dns6=dict(type='list', elements='str'),
dns6_search=dict(type='list', elements='str'),
+ dns6_options=dict(type='list', elements='str'),
dns6_ignore_auto=dict(type='bool', default=False),
routes6=dict(type='list', elements='str'),
routes6_extended=dict(type='list',
@@ -2369,7 +2497,7 @@ def main():
hellotime=dict(type='int', default=2),
maxage=dict(type='int', default=20),
ageingtime=dict(type='int', default=300),
- hairpin=dict(type='bool'),
+ hairpin=dict(type='bool', default=False),
path_cost=dict(type='int', default=100),
# team specific vars
runner=dict(type='str', default='roundrobin',
@@ -2432,7 +2560,7 @@ def main():
if nmcli.runner_fast_rate is not None and nmcli.runner != "lacp":
nmcli.module.fail_json(msg="runner-fast-rate is only allowed for runner lacp")
# team-slave checks
- if nmcli.type == 'team-slave':
+ if nmcli.type == 'team-slave' or nmcli.slave_type == 'team':
if nmcli.master is None:
nmcli.module.fail_json(msg="Please specify a name for the master when type is %s" % nmcli.type)
if nmcli.ifname is None:
diff --git a/ansible_collections/community/general/plugins/modules/nomad_job.py b/ansible_collections/community/general/plugins/modules/nomad_job.py
index ca76536b4..87e8ec04c 100644
--- a/ansible_collections/community/general/plugins/modules/nomad_job.py
+++ b/ansible_collections/community/general/plugins/modules/nomad_job.py
@@ -33,7 +33,7 @@ options:
description:
- Name of job for delete, stop and start job without source.
- Name of job for delete, stop and start job without source.
- - Either this or I(content) must be specified.
+ - Either this or O(content) must be specified.
type: str
state:
description:
@@ -49,7 +49,7 @@ options:
content:
description:
- Content of Nomad job.
- - Either this or I(name) must be specified.
+ - Either this or O(name) must be specified.
type: str
content_format:
description:
@@ -71,6 +71,14 @@ EXAMPLES = '''
content: "{{ lookup('ansible.builtin.file', 'job.hcl') }}"
timeout: 120
+- name: Connect with port to create job
+ community.general.nomad_job:
+ host: localhost
+ port: 4645
+ state: present
+ content: "{{ lookup('ansible.builtin.file', 'job.hcl') }}"
+ timeout: 120
+
- name: Stop job
community.general.nomad_job:
host: localhost
@@ -103,6 +111,7 @@ def run():
module = AnsibleModule(
argument_spec=dict(
host=dict(required=True, type='str'),
+ port=dict(type='int', default=4646),
state=dict(required=True, choices=['present', 'absent']),
use_ssl=dict(type='bool', default=True),
timeout=dict(type='int', default=5),
@@ -132,6 +141,7 @@ def run():
nomad_client = nomad.Nomad(
host=module.params.get('host'),
+ port=module.params.get('port'),
secure=module.params.get('use_ssl'),
timeout=module.params.get('timeout'),
verify=module.params.get('validate_certs'),
diff --git a/ansible_collections/community/general/plugins/modules/nomad_job_info.py b/ansible_collections/community/general/plugins/modules/nomad_job_info.py
index 5ee25a57a..bd7cf8ca9 100644
--- a/ansible_collections/community/general/plugins/modules/nomad_job_info.py
+++ b/ansible_collections/community/general/plugins/modules/nomad_job_info.py
@@ -29,8 +29,6 @@ options:
- Name of job for Get info.
- If not specified, lists all jobs.
type: str
-notes:
- - C(check_mode) is supported.
seealso:
- name: Nomad jobs documentation
description: Complete documentation for Nomad API jobs.
@@ -283,6 +281,7 @@ def run():
module = AnsibleModule(
argument_spec=dict(
host=dict(required=True, type='str'),
+ port=dict(type='int', default=4646),
use_ssl=dict(type='bool', default=True),
timeout=dict(type='int', default=5),
validate_certs=dict(type='bool', default=True),
@@ -302,6 +301,7 @@ def run():
nomad_client = nomad.Nomad(
host=module.params.get('host'),
+ port=module.params.get('port'),
secure=module.params.get('use_ssl'),
timeout=module.params.get('timeout'),
verify=module.params.get('validate_certs'),
diff --git a/ansible_collections/community/general/plugins/modules/nomad_token.py b/ansible_collections/community/general/plugins/modules/nomad_token.py
new file mode 100644
index 000000000..51a2f9716
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/nomad_token.py
@@ -0,0 +1,301 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2023, Pedro Nascimento <apecnascimento@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nomad_token
+author: Pedro Nascimento (@apecnascimento)
+version_added: "8.1.0"
+short_description: Manage Nomad ACL tokens
+description:
+ - This module allows to create Bootstrap tokens, create ACL tokens, update ACL tokens, and delete ACL tokens.
+requirements:
+ - python-nomad
+extends_documentation_fragment:
+ - community.general.nomad
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Name of ACL token to create.
+ type: str
+ token_type:
+ description:
+ - The type of the token can be V(client), V(management), or V(bootstrap).
+ choices: ["client", "management", "bootstrap"]
+ type: str
+ default: "client"
+ policies:
+ description:
+ - A list of the policies assigned to the token.
+ type: list
+ elements: str
+ default: []
+ global_replicated:
+ description:
+ - Indicates whether or not the token was created with the C(--global).
+ type: bool
+ default: false
+ state:
+ description:
+ - Create or remove ACL token.
+ choices: ["present", "absent"]
+ required: true
+ type: str
+
+seealso:
+ - name: Nomad ACL documentation
+ description: Complete documentation for Nomad API ACL.
+ link: https://developer.hashicorp.com/nomad/api-docs/acl/tokens
+'''
+
+EXAMPLES = '''
+- name: Create boostrap token
+ community.general.nomad_token:
+ host: localhost
+ token_type: bootstrap
+ state: present
+
+- name: Create ACL token
+ community.general.nomad_token:
+ host: localhost
+ name: "Dev token"
+ token_type: client
+ policies:
+ - readonly
+ global_replicated: false
+ state: absent
+
+- name: Update ACL token Dev token
+ community.general.nomad_token:
+ host: localhost
+ name: "Dev token"
+ token_type: client
+ policies:
+ - readonly
+ - devpolicy
+ global_replicated: false
+ state: absent
+
+- name: Delete ACL token
+ community.general.nomad_token:
+ host: localhost
+ name: "Dev token"
+ state: absent
+'''
+
+RETURN = '''
+result:
+ description: Result returned by nomad.
+ returned: always
+ type: dict
+ sample: {
+ "accessor_id": "0d01c55f-8d63-f832-04ff-1866d4eb594e",
+ "create_index": 14,
+ "create_time": "2023-11-12T18:48:34.248857001Z",
+ "expiration_time": null,
+ "expiration_ttl": "",
+ "global": true,
+ "hash": "eSn8H8RVqh8As8WQNnC2vlBRqXy6DECogc5umzX0P30=",
+ "modify_index": 836,
+ "name": "devs",
+ "policies": [
+ "readonly"
+ ],
+ "roles": null,
+ "secret_id": "12e878ab-e1f6-e103-b4c4-3b5173bb4cea",
+ "type": "client"
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+import_nomad = None
+
+try:
+ import nomad
+
+ import_nomad = True
+except ImportError:
+ import_nomad = False
+
+
+def get_token(name, nomad_client):
+ tokens = nomad_client.acl.get_tokens()
+ token = next((token for token in tokens
+ if token.get('Name') == name), None)
+ return token
+
+
+def transform_response(nomad_response):
+ transformed_response = {
+ "accessor_id": nomad_response['AccessorID'],
+ "create_index": nomad_response['CreateIndex'],
+ "create_time": nomad_response['CreateTime'],
+ "expiration_ttl": nomad_response['ExpirationTTL'],
+ "expiration_time": nomad_response['ExpirationTime'],
+ "global": nomad_response['Global'],
+ "hash": nomad_response['Hash'],
+ "modify_index": nomad_response['ModifyIndex'],
+ "name": nomad_response['Name'],
+ "policies": nomad_response['Policies'],
+ "roles": nomad_response['Roles'],
+ "secret_id": nomad_response['SecretID'],
+ "type": nomad_response['Type']
+ }
+
+ return transformed_response
+
+
+argument_spec = dict(
+ host=dict(required=True, type='str'),
+ port=dict(type='int', default=4646),
+ state=dict(required=True, choices=['present', 'absent']),
+ use_ssl=dict(type='bool', default=True),
+ timeout=dict(type='int', default=5),
+ validate_certs=dict(type='bool', default=True),
+ client_cert=dict(type='path'),
+ client_key=dict(type='path'),
+ namespace=dict(type='str'),
+ token=dict(type='str', no_log=True),
+ name=dict(type='str'),
+ token_type=dict(choices=['client', 'management', 'bootstrap'], default='client'),
+ policies=dict(type='list', elements='str', default=[]),
+ global_replicated=dict(type='bool', default=False),
+)
+
+
+def setup_module_object():
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=False,
+ required_one_of=[
+ ['name', 'token_type']
+ ],
+ required_if=[
+ ('token_type', 'client', ('name',)),
+ ('token_type', 'management', ('name',)),
+ ],
+ )
+ return module
+
+
+def setup_nomad_client(module):
+ if not import_nomad:
+ module.fail_json(msg=missing_required_lib("python-nomad"))
+
+ certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key'))
+
+ nomad_client = nomad.Nomad(
+ host=module.params.get('host'),
+ port=module.params.get('port'),
+ secure=module.params.get('use_ssl'),
+ timeout=module.params.get('timeout'),
+ verify=module.params.get('validate_certs'),
+ cert=certificate_ssl,
+ namespace=module.params.get('namespace'),
+ token=module.params.get('token')
+ )
+
+ return nomad_client
+
+
+def run(module):
+ nomad_client = setup_nomad_client(module)
+
+ msg = ""
+ result = {}
+ changed = False
+ if module.params.get('state') == "present":
+
+ if module.params.get('token_type') == 'bootstrap':
+ try:
+ current_token = get_token('Bootstrap Token', nomad_client)
+ if current_token:
+ msg = "ACL bootstrap already exist."
+ else:
+ nomad_result = nomad_client.acl.generate_bootstrap()
+ msg = "Boostrap token created."
+ result = transform_response(nomad_result)
+ changed = True
+
+ except nomad.api.exceptions.URLNotAuthorizedNomadException:
+ try:
+ nomad_result = nomad_client.acl.generate_bootstrap()
+ msg = "Boostrap token created."
+ result = transform_response(nomad_result)
+ changed = True
+
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+ else:
+ try:
+ token_info = {
+ "Name": module.params.get('name'),
+ "Type": module.params.get('token_type'),
+ "Policies": module.params.get('policies'),
+ "Global": module.params.get('global_replicated')
+ }
+
+ current_token = get_token(token_info['Name'], nomad_client)
+
+ if current_token:
+ token_info['AccessorID'] = current_token['AccessorID']
+ nomad_result = nomad_client.acl.update_token(current_token['AccessorID'], token_info)
+ msg = "ACL token updated."
+ result = transform_response(nomad_result)
+ changed = True
+
+ else:
+ nomad_result = nomad_client.acl.create_token(token_info)
+ msg = "ACL token Created."
+ result = transform_response(nomad_result)
+ changed = True
+
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ if module.params.get('state') == "absent":
+
+ if not module.params.get('name'):
+ module.fail_json(msg="name is needed to delete token.")
+
+ if module.params.get('token_type') == 'bootstrap' or module.params.get('name') == 'Bootstrap Token':
+ module.fail_json(msg="Delete ACL bootstrap token is not allowed.")
+
+ try:
+ token = get_token(module.params.get('name'), nomad_client)
+ if token:
+ nomad_client.acl.delete_token(token.get('AccessorID'))
+ msg = 'ACL token deleted.'
+ changed = True
+ else:
+ msg = "No token with name '{0}' found".format(module.params.get('name'))
+
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ module.exit_json(changed=changed, msg=msg, result=result)
+
+
+def main():
+ module = setup_module_object()
+ run(module)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/nosh.py b/ansible_collections/community/general/plugins/modules/nosh.py
index 2dfb8d590..0e03142d8 100644
--- a/ansible_collections/community/general/plugins/modules/nosh.py
+++ b/ansible_collections/community/general/plugins/modules/nosh.py
@@ -36,26 +36,26 @@ options:
required: false
choices: [ started, stopped, reset, restarted, reloaded ]
description:
- - C(started)/C(stopped) are idempotent actions that will not run
+ - V(started)/V(stopped) are idempotent actions that will not run
commands unless necessary.
- C(restarted) will always bounce the service.
- C(reloaded) will send a SIGHUP or start the service.
- C(reset) will start or stop the service according to whether it is
+ V(restarted) will always bounce the service.
+ V(reloaded) will send a SIGHUP or start the service.
+ V(reset) will start or stop the service according to whether it is
enabled or not.
enabled:
required: false
type: bool
description:
- Enable or disable the service, independently of C(*.preset) file
- preference or running state. Mutually exclusive with I(preset). Will take
- effect prior to I(state=reset).
+ preference or running state. Mutually exclusive with O(preset). Will take
+ effect prior to O(state=reset).
preset:
required: false
type: bool
description:
- Enable or disable the service according to local preferences in C(*.preset) files.
- Mutually exclusive with I(enabled). Only has an effect if set to true. Will take
- effect prior to I(state=reset).
+ Mutually exclusive with O(enabled). Only has an effect if set to true. Will take
+ effect prior to O(state=reset).
user:
required: false
default: false
@@ -146,12 +146,12 @@ preset:
type: bool
sample: 'False'
state:
- description: service process run state, C(None) if the service is not loaded and will not be started
+ description: service process run state, V(none) if the service is not loaded and will not be started
returned: if state option is used
type: str
sample: "reloaded"
status:
- description: A dictionary with the key=value pairs returned by C(system-control show-json) or C(None) if the service is not loaded
+ description: A dictionary with the key=value pairs returned by C(system-control show-json) or V(none) if the service is not loaded
returned: success
type: complex
contains:
diff --git a/ansible_collections/community/general/plugins/modules/npm.py b/ansible_collections/community/general/plugins/modules/npm.py
index 013fd6e57..e6dc0b772 100644
--- a/ansible_collections/community/general/plugins/modules/npm.py
+++ b/ansible_collections/community/general/plugins/modules/npm.py
@@ -150,6 +150,7 @@ import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native
+from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
class Npm(object):
@@ -172,33 +173,29 @@ class Npm(object):
else:
self.executable = [module.get_bin_path('npm', True)]
- if kwargs['version'] and self.state != 'absent':
- self.name_version = self.name + '@' + str(self.version)
+ if kwargs['version'] and kwargs['state'] != 'absent':
+ self.name_version = self.name + '@' + str(kwargs['version'])
else:
self.name_version = self.name
+ self.runner = CmdRunner(
+ module,
+ command=self.executable,
+ arg_formats=dict(
+ exec_args=cmd_runner_fmt.as_list(),
+ global_=cmd_runner_fmt.as_bool('--global'),
+ production=cmd_runner_fmt.as_bool('--production'),
+ ignore_scripts=cmd_runner_fmt.as_bool('--ignore-scripts'),
+ unsafe_perm=cmd_runner_fmt.as_bool('--unsafe-perm'),
+ name_version=cmd_runner_fmt.as_list(),
+ registry=cmd_runner_fmt.as_opt_val('--registry'),
+ no_optional=cmd_runner_fmt.as_bool('--no-optional'),
+ no_bin_links=cmd_runner_fmt.as_bool('--no-bin-links'),
+ )
+ )
+
def _exec(self, args, run_in_check_mode=False, check_rc=True, add_package_name=True):
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
- cmd = self.executable + args
-
- if self.glbl:
- cmd.append('--global')
- if self.production and ('install' in cmd or 'update' in cmd or 'ci' in cmd):
- cmd.append('--production')
- if self.ignore_scripts:
- cmd.append('--ignore-scripts')
- if self.unsafe_perm:
- cmd.append('--unsafe-perm')
- if self.name_version and add_package_name:
- cmd.append(self.name_version)
- if self.registry:
- cmd.append('--registry')
- cmd.append(self.registry)
- if self.no_optional:
- cmd.append('--no-optional')
- if self.no_bin_links:
- cmd.append('--no-bin-links')
-
# If path is specified, cd into that path and run the command.
cwd = None
if self.path:
@@ -208,8 +205,19 @@ class Npm(object):
self.module.fail_json(msg="path %s is not a directory" % self.path)
cwd = self.path
- rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
+ params = dict(self.module.params)
+ params['exec_args'] = args
+ params['global_'] = self.glbl
+ params['production'] = self.production and ('install' in args or 'update' in args or 'ci' in args)
+ params['name_version'] = self.name_version if add_package_name else None
+
+ with self.runner(
+ "exec_args global_ production ignore_scripts unsafe_perm name_version registry no_optional no_bin_links",
+ check_rc=check_rc, cwd=cwd
+ ) as ctx:
+ rc, out, err = ctx.run(**params)
return out
+
return ''
def list(self):
@@ -269,12 +277,12 @@ class Npm(object):
def main():
arg_spec = dict(
- name=dict(default=None, type='str'),
- path=dict(default=None, type='path'),
- version=dict(default=None, type='str'),
+ name=dict(type='str'),
+ path=dict(type='path'),
+ version=dict(type='str'),
production=dict(default=False, type='bool'),
- executable=dict(default=None, type='path'),
- registry=dict(default=None, type='str'),
+ executable=dict(type='path'),
+ registry=dict(type='str'),
state=dict(default='present', choices=['present', 'absent', 'latest']),
ignore_scripts=dict(default=False, type='bool'),
unsafe_perm=dict(default=False, type='bool'),
@@ -285,34 +293,35 @@ def main():
arg_spec['global'] = dict(default=False, type='bool')
module = AnsibleModule(
argument_spec=arg_spec,
- supports_check_mode=True
+ required_if=[('state', 'absent', ['name'])],
+ supports_check_mode=True,
)
name = module.params['name']
path = module.params['path']
version = module.params['version']
glbl = module.params['global']
- production = module.params['production']
- executable = module.params['executable']
- registry = module.params['registry']
state = module.params['state']
- ignore_scripts = module.params['ignore_scripts']
- unsafe_perm = module.params['unsafe_perm']
- ci = module.params['ci']
- no_optional = module.params['no_optional']
- no_bin_links = module.params['no_bin_links']
if not path and not glbl:
module.fail_json(msg='path must be specified when not using global')
- if state == 'absent' and not name:
- module.fail_json(msg='uninstalling a package is only available for named packages')
- npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production,
- executable=executable, registry=registry, ignore_scripts=ignore_scripts,
- unsafe_perm=unsafe_perm, state=state, no_optional=no_optional, no_bin_links=no_bin_links)
+ npm = Npm(module,
+ name=name,
+ path=path,
+ version=version,
+ glbl=glbl,
+ production=module.params['production'],
+ executable=module.params['executable'],
+ registry=module.params['registry'],
+ ignore_scripts=module.params['ignore_scripts'],
+ unsafe_perm=module.params['unsafe_perm'],
+ state=state,
+ no_optional=module.params['no_optional'],
+ no_bin_links=module.params['no_bin_links'])
changed = False
- if ci:
+ if module.params['ci']:
npm.ci_install()
changed = True
elif state == 'present':
diff --git a/ansible_collections/community/general/plugins/modules/nsupdate.py b/ansible_collections/community/general/plugins/modules/nsupdate.py
index b2a84f76b..63750165c 100644
--- a/ansible_collections/community/general/plugins/modules/nsupdate.py
+++ b/ansible_collections/community/general/plugins/modules/nsupdate.py
@@ -45,29 +45,28 @@ options:
type: str
port:
description:
- - Use this TCP port when connecting to C(server).
+ - Use this TCP port when connecting to O(server).
default: 53
type: int
key_name:
description:
- - Use TSIG key name to authenticate against DNS C(server)
+ - Use TSIG key name to authenticate against DNS O(server)
type: str
key_secret:
description:
- - Use TSIG key secret, associated with C(key_name), to authenticate against C(server)
+ - Use TSIG key secret, associated with O(key_name), to authenticate against O(server)
type: str
key_algorithm:
description:
- - Specify key algorithm used by C(key_secret).
+ - Specify key algorithm used by O(key_secret).
choices: ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224', 'hmac-sha256', 'hmac-sha384',
'hmac-sha512']
default: 'hmac-md5'
type: str
zone:
description:
- - DNS record will be modified on this C(zone).
+ - DNS record will be modified on this O(zone).
- When omitted DNS will be queried to attempt finding the correct zone.
- - Starting with Ansible 2.7 this parameter is optional.
type: str
record:
description:
@@ -467,10 +466,8 @@ class RecordManager(object):
if lookup.rcode() != dns.rcode.NOERROR:
self.module.fail_json(msg='Failed to lookup TTL of existing matching record.')
- if self.module.params['type'] == 'NS':
- current_ttl = lookup.answer[0].ttl if lookup.answer else lookup.authority[0].ttl
- else:
- current_ttl = lookup.answer[0].ttl
+ current_ttl = lookup.answer[0].ttl if lookup.answer else lookup.authority[0].ttl
+
return current_ttl != self.module.params['ttl']
diff --git a/ansible_collections/community/general/plugins/modules/ocapi_command.py b/ansible_collections/community/general/plugins/modules/ocapi_command.py
index ed2366736..b6b9b6b98 100644
--- a/ansible_collections/community/general/plugins/modules/ocapi_command.py
+++ b/ansible_collections/community/general/plugins/modules/ocapi_command.py
@@ -41,17 +41,17 @@ options:
- Base URI of OOB controller.
type: str
proxy_slot_number:
- description: For proxied inband requests, the slot number of the IOM. Only applies if I(baseuri) is a proxy server.
+ description: For proxied inband requests, the slot number of the IOM. Only applies if O(baseuri) is a proxy server.
type: int
update_image_path:
required: false
description:
- - For C(FWUpload), the path on the local filesystem of the firmware update image.
+ - For O(command=FWUpload), the path on the local filesystem of the firmware update image.
type: str
job_name:
required: false
description:
- - For C(DeleteJob) command, the name of the job to delete.
+ - For O(command=DeleteJob) command, the name of the job to delete.
type: str
username:
required: true
diff --git a/ansible_collections/community/general/plugins/modules/ocapi_info.py b/ansible_collections/community/general/plugins/modules/ocapi_info.py
index d7dfdccc7..9906d804c 100644
--- a/ansible_collections/community/general/plugins/modules/ocapi_info.py
+++ b/ansible_collections/community/general/plugins/modules/ocapi_info.py
@@ -38,7 +38,7 @@ options:
- Base URI of OOB controller.
type: str
proxy_slot_number:
- description: For proxied inband requests, the slot number of the IOM. Only applies if I(baseuri) is a proxy server.
+ description: For proxied inband requests, the slot number of the IOM. Only applies if O(baseuri) is a proxy server.
type: int
username:
required: true
@@ -83,45 +83,45 @@ msg:
sample: "Action was successful"
percentComplete:
- description: Percent complete of the relevant operation. Applies to C(JobStatus) command.
+ description: Percent complete of the relevant operation. Applies to O(command=JobStatus).
returned: when supported
type: int
sample: 99
operationStatus:
- description: Status of the relevant operation. Applies to C(JobStatus) command. See OCAPI documentation for details.
+ description: Status of the relevant operation. Applies to O(command=JobStatus). See OCAPI documentation for details.
returned: when supported
type: str
sample: "Activate needed"
operationStatusId:
- description: Integer value of status (corresponds to operationStatus). Applies to C(JobStatus) command. See OCAPI documentation for details.
+ description: Integer value of status (corresponds to operationStatus). Applies to O(command=JobStatus). See OCAPI documentation for details.
returned: when supported
type: int
sample: 65540
operationHealth:
- description: Health of the operation. Applies to C(JobStatus) command. See OCAPI documentation for details.
+ description: Health of the operation. Applies to O(command=JobStatus). See OCAPI documentation for details.
returned: when supported
type: str
sample: "OK"
operationHealthId:
description: >
- Integer value for health of the operation (corresponds to C(operationHealth)). Applies to C(JobStatus) command.
+ Integer value for health of the operation (corresponds to RV(operationHealth)). Applies to O(command=JobStatus).
See OCAPI documentation for details.
returned: when supported
type: str
sample: "OK"
details:
- description: Details of the relevant operation. Applies to C(JobStatus) command.
+ description: Details of the relevant operation. Applies to O(command=JobStatus).
returned: when supported
type: list
elements: str
status:
- description: Dict containing status information. See OCAPI documentation for details.
+ description: Dictionary containing status information. See OCAPI documentation for details.
returned: when supported
type: dict
sample: {
diff --git a/ansible_collections/community/general/plugins/modules/oci_vcn.py b/ansible_collections/community/general/plugins/modules/oci_vcn.py
index 4e6487b8f..bf110b94b 100644
--- a/ansible_collections/community/general/plugins/modules/oci_vcn.py
+++ b/ansible_collections/community/general/plugins/modules/oci_vcn.py
@@ -23,12 +23,12 @@ attributes:
support: none
options:
cidr_block:
- description: The CIDR IP address block of the VCN. Required when creating a VCN with I(state=present).
+ description: The CIDR IP address block of the VCN. Required when creating a VCN with O(state=present).
type: str
required: false
compartment_id:
- description: The OCID of the compartment to contain the VCN. Required when creating a VCN with I(state=present).
- This option is mutually exclusive with I(vcn_id).
+ description: The OCID of the compartment to contain the VCN. Required when creating a VCN with O(state=present).
+ This option is mutually exclusive with O(vcn_id).
type: str
display_name:
description: A user-friendly name. Does not have to be unique, and it's changeable.
@@ -42,13 +42,13 @@ options:
with a letter. The value cannot be changed.
type: str
state:
- description: Create or update a VCN with I(state=present). Use I(state=absent) to delete a VCN.
+ description: Create or update a VCN with O(state=present). Use O(state=absent) to delete a VCN.
type: str
default: present
choices: ['present', 'absent']
vcn_id:
- description: The OCID of the VCN. Required when deleting a VCN with I(state=absent) or updating a VCN
- with I(state=present). This option is mutually exclusive with I(compartment_id).
+ description: The OCID of the VCN. Required when deleting a VCN with O(state=absent) or updating a VCN
+ with O(state=present). This option is mutually exclusive with O(compartment_id).
type: str
aliases: [ 'id' ]
author: "Rohit Chaware (@rohitChaware)"
diff --git a/ansible_collections/community/general/plugins/modules/odbc.py b/ansible_collections/community/general/plugins/modules/odbc.py
index fbc4b63ae..bc2e89656 100644
--- a/ansible_collections/community/general/plugins/modules/odbc.py
+++ b/ansible_collections/community/general/plugins/modules/odbc.py
@@ -43,12 +43,11 @@ options:
description:
- Perform a commit after the execution of the SQL query.
- Some databases allow a commit after a select whereas others raise an exception.
- - Default is C(true) to support legacy module behavior.
+ - Default is V(true) to support legacy module behavior.
type: bool
default: true
version_added: 1.3.0
requirements:
- - "python >= 2.6"
- "pyodbc"
notes:
diff --git a/ansible_collections/community/general/plugins/modules/one_host.py b/ansible_collections/community/general/plugins/modules/one_host.py
index c4578f950..eea112173 100644
--- a/ansible_collections/community/general/plugins/modules/one_host.py
+++ b/ansible_collections/community/general/plugins/modules/one_host.py
@@ -38,11 +38,11 @@ options:
state:
description:
- Takes the host to the desired lifecycle state.
- - If C(absent) the host will be deleted from the cluster.
- - If C(present) the host will be created in the cluster (includes C(enabled), C(disabled) and C(offline) states).
- - If C(enabled) the host is fully operational.
- - C(disabled), e.g. to perform maintenance operations.
- - C(offline), host is totally offline.
+ - If V(absent) the host will be deleted from the cluster.
+ - If V(present) the host will be created in the cluster (includes V(enabled), V(disabled) and V(offline) states).
+ - If V(enabled) the host is fully operational.
+ - V(disabled), e.g. to perform maintenance operations.
+ - V(offline), host is totally offline.
choices:
- absent
- present
diff --git a/ansible_collections/community/general/plugins/modules/one_image.py b/ansible_collections/community/general/plugins/modules/one_image.py
index a50b33e93..a0081a0fe 100644
--- a/ansible_collections/community/general/plugins/modules/one_image.py
+++ b/ansible_collections/community/general/plugins/modules/one_image.py
@@ -29,32 +29,32 @@ options:
- URL of the OpenNebula RPC server.
- It is recommended to use HTTPS so that the username/password are not
- transferred over the network unencrypted.
- - If not set then the value of the C(ONE_URL) environment variable is used.
+ - If not set then the value of the E(ONE_URL) environment variable is used.
type: str
api_username:
description:
- Name of the user to login into the OpenNebula RPC server. If not set
- - then the value of the C(ONE_USERNAME) environment variable is used.
+ - then the value of the E(ONE_USERNAME) environment variable is used.
type: str
api_password:
description:
- Password of the user to login into OpenNebula RPC server. If not set
- - then the value of the C(ONE_PASSWORD) environment variable is used.
+ - then the value of the E(ONE_PASSWORD) environment variable is used.
type: str
id:
description:
- - A C(id) of the image you would like to manage.
+ - A O(id) of the image you would like to manage.
type: int
name:
description:
- - A C(name) of the image you would like to manage.
+ - A O(name) of the image you would like to manage.
type: str
state:
description:
- - C(present) - state that is used to manage the image
- - C(absent) - delete the image
- - C(cloned) - clone the image
- - C(renamed) - rename the image to the C(new_name)
+ - V(present) - state that is used to manage the image
+ - V(absent) - delete the image
+ - V(cloned) - clone the image
+ - V(renamed) - rename the image to the O(new_name)
choices: ["present", "absent", "cloned", "renamed"]
default: present
type: str
@@ -65,7 +65,7 @@ options:
new_name:
description:
- A name that will be assigned to the existing or new image.
- - In the case of cloning, by default C(new_name) will take the name of the origin image with the prefix 'Copy of'.
+ - In the case of cloning, by default O(new_name) will take the name of the origin image with the prefix 'Copy of'.
type: str
author:
- "Milan Ilic (@ilicmilan)"
diff --git a/ansible_collections/community/general/plugins/modules/one_image_info.py b/ansible_collections/community/general/plugins/modules/one_image_info.py
index 938f0ef2a..c9d7c4035 100644
--- a/ansible_collections/community/general/plugins/modules/one_image_info.py
+++ b/ansible_collections/community/general/plugins/modules/one_image_info.py
@@ -14,7 +14,6 @@ module: one_image_info
short_description: Gather information on OpenNebula images
description:
- Gather information on OpenNebula images.
- - This module was called C(one_image_facts) before Ansible 2.9. The usage did not change.
requirements:
- pyone
extends_documentation_fragment:
@@ -26,17 +25,17 @@ options:
- URL of the OpenNebula RPC server.
- It is recommended to use HTTPS so that the username/password are not
- transferred over the network unencrypted.
- - If not set then the value of the C(ONE_URL) environment variable is used.
+ - If not set then the value of the E(ONE_URL) environment variable is used.
type: str
api_username:
description:
- Name of the user to login into the OpenNebula RPC server. If not set
- - then the value of the C(ONE_USERNAME) environment variable is used.
+ - then the value of the E(ONE_USERNAME) environment variable is used.
type: str
api_password:
description:
- Password of the user to login into OpenNebula RPC server. If not set
- - then the value of the C(ONE_PASSWORD) environment variable is used.
+ - then the value of the E(ONE_PASSWORD) environment variable is used.
type: str
ids:
description:
@@ -46,10 +45,10 @@ options:
elements: str
name:
description:
- - A C(name) of the image whose facts will be gathered.
- - If the C(name) begins with '~' the C(name) will be used as regex pattern
+ - A O(name) of the image whose facts will be gathered.
+ - If the O(name) begins with V(~) the O(name) will be used as regex pattern
- which restricts the list of images (whose facts will be returned) whose names match specified regex.
- - Also, if the C(name) begins with '~*' case-insensitive matching will be performed.
+ - Also, if the O(name) begins with V(~*) case-insensitive matching will be performed.
- See examples for more details.
type: str
author:
diff --git a/ansible_collections/community/general/plugins/modules/one_service.py b/ansible_collections/community/general/plugins/modules/one_service.py
index 4f5143887..81b42c0ec 100644
--- a/ansible_collections/community/general/plugins/modules/one_service.py
+++ b/ansible_collections/community/general/plugins/modules/one_service.py
@@ -26,15 +26,15 @@ options:
description:
- URL of the OpenNebula OneFlow API server.
- It is recommended to use HTTPS so that the username/password are not transferred over the network unencrypted.
- - If not set then the value of the ONEFLOW_URL environment variable is used.
+ - If not set then the value of the E(ONEFLOW_URL) environment variable is used.
type: str
api_username:
description:
- - Name of the user to login into the OpenNebula OneFlow API server. If not set then the value of the C(ONEFLOW_USERNAME) environment variable is used.
+ - Name of the user to login into the OpenNebula OneFlow API server. If not set then the value of the E(ONEFLOW_USERNAME) environment variable is used.
type: str
api_password:
description:
- - Password of the user to login into OpenNebula OneFlow API server. If not set then the value of the C(ONEFLOW_PASSWORD) environment variable is used.
+ - Password of the user to login into OpenNebula OneFlow API server. If not set then the value of the E(ONEFLOW_PASSWORD) environment variable is used.
type: str
template_name:
description:
@@ -54,20 +54,20 @@ options:
type: str
unique:
description:
- - Setting I(unique=true) will make sure that there is only one service instance running with a name set with C(service_name) when
- instantiating a service from a template specified with I(template_id) or I(template_name). Check examples below.
+ - Setting O(unique=true) will make sure that there is only one service instance running with a name set with O(service_name) when
+ instantiating a service from a template specified with O(template_id) or O(template_name). Check examples below.
type: bool
default: false
state:
description:
- - C(present) - instantiate a service from a template specified with I(template_id) or I(template_name).
- - C(absent) - terminate an instance of a service specified with I(template_id) or I(template_name).
+ - V(present) - instantiate a service from a template specified with O(template_id) or O(template_name).
+ - V(absent) - terminate an instance of a service specified with O(template_id) or O(template_name).
choices: ["present", "absent"]
default: present
type: str
mode:
description:
- - Set permission mode of a service instance in octet format, e.g. C(600) to give owner C(use) and C(manage) and nothing to group and others.
+ - Set permission mode of a service instance in octet format, for example V(0600) to give owner C(use) and C(manage) and nothing to group and others.
type: str
owner_id:
description:
diff --git a/ansible_collections/community/general/plugins/modules/one_template.py b/ansible_collections/community/general/plugins/modules/one_template.py
index 97d0f856e..06460fee5 100644
--- a/ansible_collections/community/general/plugins/modules/one_template.py
+++ b/ansible_collections/community/general/plugins/modules/one_template.py
@@ -34,12 +34,12 @@ attributes:
options:
id:
description:
- - A I(id) of the template you would like to manage. If not set then a
- - new template will be created with the given I(name).
+ - A O(id) of the template you would like to manage. If not set then a
+ - new template will be created with the given O(name).
type: int
name:
description:
- - A I(name) of the template you would like to manage. If a template with
+ - A O(name) of the template you would like to manage. If a template with
- the given name does not exist it will be created, otherwise it will be
- managed by this module.
type: str
@@ -49,8 +49,8 @@ options:
type: str
state:
description:
- - C(present) - state that is used to manage the template.
- - C(absent) - delete the template.
+ - V(present) - state that is used to manage the template.
+ - V(absent) - delete the template.
choices: ["present", "absent"]
default: present
type: str
@@ -116,36 +116,36 @@ RETURN = '''
id:
description: template id
type: int
- returned: when I(state=present)
+ returned: when O(state=present)
sample: 153
name:
description: template name
type: str
- returned: when I(state=present)
+ returned: when O(state=present)
sample: app1
template:
description: the parsed template
type: dict
- returned: when I(state=present)
+ returned: when O(state=present)
group_id:
description: template's group id
type: int
- returned: when I(state=present)
+ returned: when O(state=present)
sample: 1
group_name:
description: template's group name
type: str
- returned: when I(state=present)
+ returned: when O(state=present)
sample: one-users
owner_id:
description: template's owner id
type: int
- returned: when I(state=present)
+ returned: when O(state=present)
sample: 143
owner_name:
description: template's owner name
type: str
- returned: when I(state=present)
+ returned: when O(state=present)
sample: ansible-test
'''
diff --git a/ansible_collections/community/general/plugins/modules/one_vm.py b/ansible_collections/community/general/plugins/modules/one_vm.py
index 1bbf47466..8ee9c8560 100644
--- a/ansible_collections/community/general/plugins/modules/one_vm.py
+++ b/ansible_collections/community/general/plugins/modules/one_vm.py
@@ -29,25 +29,25 @@ options:
description:
- URL of the OpenNebula RPC server.
- It is recommended to use HTTPS so that the username/password are not
- - transferred over the network unencrypted.
- - If not set then the value of the C(ONE_URL) environment variable is used.
+ transferred over the network unencrypted.
+ - If not set then the value of the E(ONE_URL) environment variable is used.
type: str
api_username:
description:
- Name of the user to login into the OpenNebula RPC server. If not set
- - then the value of the C(ONE_USERNAME) environment variable is used.
+ then the value of the E(ONE_USERNAME) environment variable is used.
type: str
api_password:
description:
- Password of the user to login into OpenNebula RPC server. If not set
- - then the value of the C(ONE_PASSWORD) environment variable is used.
- - if both I(api_username) or I(api_password) are not set, then it will try
- - authenticate with ONE auth file. Default path is "~/.one/one_auth".
- - Set environment variable C(ONE_AUTH) to override this path.
+ then the value of the E(ONE_PASSWORD) environment variable is used.
+ if both O(api_username) or O(api_password) are not set, then it will try
+ authenticate with ONE auth file. Default path is "~/.one/one_auth".
+ - Set environment variable E(ONE_AUTH) to override this path.
type: str
template_name:
description:
- - Name of VM template to use to create a new instace
+ - Name of VM template to use to create a new instance
type: str
template_id:
description:
@@ -60,32 +60,32 @@ options:
type: bool
instance_ids:
description:
- - A list of instance ids used for states':' C(absent), C(running), C(rebooted), C(poweredoff)
+ - 'A list of instance ids used for states: V(absent), V(running), V(rebooted), V(poweredoff).'
aliases: ['ids']
type: list
elements: int
state:
description:
- - C(present) - create instances from a template specified with C(template_id)/C(template_name).
- - C(running) - run instances
- - C(poweredoff) - power-off instances
- - C(rebooted) - reboot instances
- - C(absent) - terminate instances
+ - V(present) - create instances from a template specified with C(template_id)/C(template_name).
+ - V(running) - run instances
+ - V(poweredoff) - power-off instances
+ - V(rebooted) - reboot instances
+ - V(absent) - terminate instances
choices: ["present", "absent", "running", "rebooted", "poweredoff"]
default: present
type: str
hard:
description:
- - Reboot, power-off or terminate instances C(hard)
+ - Reboot, power-off or terminate instances C(hard).
default: false
type: bool
wait:
description:
- Wait for the instance to reach its desired state before returning. Keep
- - in mind if you are waiting for instance to be in running state it
- - doesn't mean that you will be able to SSH on that machine only that
- - boot process have started on that instance, see 'wait_for' example for
- - details.
+ in mind if you are waiting for instance to be in running state it
+ doesn't mean that you will be able to SSH on that machine only that
+ boot process have started on that instance, see 'wait_for' example for
+ details.
default: true
type: bool
wait_timeout:
@@ -96,36 +96,36 @@ options:
attributes:
description:
- A dictionary of key/value attributes to add to new instances, or for
- - setting C(state) of instances with these attributes.
+ setting C(state) of instances with these attributes.
- Keys are case insensitive and OpenNebula automatically converts them to upper case.
- Be aware C(NAME) is a special attribute which sets the name of the VM when it's deployed.
- C(#) character(s) can be appended to the C(NAME) and the module will automatically add
- - indexes to the names of VMs.
+ indexes to the names of VMs.
- For example':' C(NAME':' foo-###) would create VMs with names C(foo-000), C(foo-001),...
- - When used with C(count_attributes) and C(exact_count) the module will
- - match the base name without the index part.
+ - When used with O(count_attributes) and O(exact_count) the module will
+ match the base name without the index part.
default: {}
type: dict
labels:
description:
- A list of labels to associate with new instances, or for setting
- - C(state) of instances with these labels.
+ C(state) of instances with these labels.
default: []
type: list
elements: str
count_attributes:
description:
- A dictionary of key/value attributes that can only be used with
- - C(exact_count) to determine how many nodes based on a specific
- - attributes criteria should be deployed. This can be expressed in
- - multiple ways and is shown in the EXAMPLES section.
+ O(exact_count) to determine how many nodes based on a specific
+ attributes criteria should be deployed. This can be expressed in
+ multiple ways and is shown in the EXAMPLES section.
type: dict
count_labels:
description:
- - A list of labels that can only be used with C(exact_count) to determine
- - how many nodes based on a specific labels criteria should be deployed.
- - This can be expressed in multiple ways and is shown in the EXAMPLES
- - section.
+ - A list of labels that can only be used with O(exact_count) to determine
+ how many nodes based on a specific labels criteria should be deployed.
+ This can be expressed in multiple ways and is shown in the EXAMPLES
+ section.
type: list
elements: str
count:
@@ -135,14 +135,14 @@ options:
type: int
exact_count:
description:
- - Indicates how many instances that match C(count_attributes) and
- - C(count_labels) parameters should be deployed. Instances are either
- - created or terminated based on this value.
- - NOTE':' Instances with the least IDs will be terminated first.
+ - Indicates how many instances that match O(count_attributes) and
+ O(count_labels) parameters should be deployed. Instances are either
+ created or terminated based on this value.
+ - 'B(NOTE:) Instances with the least IDs will be terminated first.'
type: int
mode:
description:
- - Set permission mode of the instance in octet format, e.g. C(600) to give owner C(use) and C(manage) and nothing to group and others.
+ - Set permission mode of the instance in octet format, for example V(0600) to give owner C(use) and C(manage) and nothing to group and others.
type: str
owner_id:
description:
@@ -159,14 +159,14 @@ options:
disk_size:
description:
- The size of the disk created for new instances (in MB, GB, TB,...).
- - NOTE':' If The Template hats Multiple Disks the Order of the Sizes is
- - matched against the order specified in C(template_id)/C(template_name).
+ - 'B(NOTE:) If The Template hats Multiple Disks the Order of the Sizes is
+ matched against the order specified in O(template_id)/O(template_name).'
type: list
elements: str
cpu:
description:
- Percentage of CPU divided by 100 required for the new instance. Half a
- - processor is written 0.5.
+ processor is written 0.5.
type: float
vcpu:
description:
@@ -183,8 +183,8 @@ options:
- Creates an image from a VM disk.
- It is a dictionary where you have to specify C(name) of the new image.
- Optionally you can specify C(disk_id) of the disk you want to save. By default C(disk_id) is 0.
- - I(NOTE)':' This operation will only be performed on the first VM (if more than one VM ID is passed)
- - and the VM has to be in the C(poweredoff) state.
+ - 'B(NOTE:) This operation will only be performed on the first VM (if more than one VM ID is passed)
+ and the VM has to be in the C(poweredoff) state.'
- Also this operation will fail if an image with specified C(name) already exists.
type: dict
persistent:
@@ -195,17 +195,17 @@ options:
version_added: '0.2.0'
datastore_id:
description:
- - Name of Datastore to use to create a new instace
+ - Name of Datastore to use to create a new instance
version_added: '0.2.0'
type: int
datastore_name:
description:
- - Name of Datastore to use to create a new instace
+ - Name of Datastore to use to create a new instance
version_added: '0.2.0'
type: str
updateconf:
description:
- - When I(instance_ids) is provided, updates running VMs with the C(updateconf) API call.
+ - When O(instance_ids) is provided, updates running VMs with the C(updateconf) API call.
- When new VMs are being created, emulates the C(updateconf) API call via direct template merge.
- Allows for complete modifications of the C(CONTEXT) attribute.
type: dict
@@ -445,12 +445,12 @@ EXAMPLES = '''
RETURN = '''
instances_ids:
- description: a list of instances ids whose state is changed or which are fetched with C(instance_ids) option.
+ description: a list of instances ids whose state is changed or which are fetched with O(instance_ids) option.
type: list
returned: success
sample: [ 1234, 1235 ]
instances:
- description: a list of instances info whose state is changed or which are fetched with C(instance_ids) option.
+ description: a list of instances info whose state is changed or which are fetched with O(instance_ids) option.
type: complex
returned: success
contains:
@@ -562,7 +562,7 @@ instances:
tagged_instances:
description:
- A list of instances info based on a specific attributes and/or
- - labels that are specified with C(count_attributes) and C(count_labels)
+ - labels that are specified with O(count_attributes) and O(count_labels)
- options.
type: complex
returned: success
@@ -1390,7 +1390,7 @@ def check_name_attribute(module, attributes):
if attributes.get("NAME"):
import re
if re.match(r'^[^#]+#*$', attributes.get("NAME")) is None:
- module.fail_json(msg="Ilegal 'NAME' attribute: '" + attributes.get("NAME") +
+ module.fail_json(msg="Illegal 'NAME' attribute: '" + attributes.get("NAME") +
"' .Signs '#' are allowed only at the end of the name and the name cannot contain only '#'.")
diff --git a/ansible_collections/community/general/plugins/modules/oneandone_firewall_policy.py b/ansible_collections/community/general/plugins/modules/oneandone_firewall_policy.py
index 37dca74f2..dfcabf6f6 100644
--- a/ansible_collections/community/general/plugins/modules/oneandone_firewall_policy.py
+++ b/ansible_collections/community/general/plugins/modules/oneandone_firewall_policy.py
@@ -110,7 +110,6 @@ options:
requirements:
- "1and1"
- - "python >= 2.6"
author:
- "Amel Ajdinovic (@aajdinov)"
diff --git a/ansible_collections/community/general/plugins/modules/oneandone_load_balancer.py b/ansible_collections/community/general/plugins/modules/oneandone_load_balancer.py
index 7f7af9c4f..da361ef2d 100644
--- a/ansible_collections/community/general/plugins/modules/oneandone_load_balancer.py
+++ b/ansible_collections/community/general/plugins/modules/oneandone_load_balancer.py
@@ -40,7 +40,7 @@ options:
api_url:
description:
- Custom API URL. Overrides the
- ONEANDONE_API_URL environment variable.
+ E(ONEANDONE_API_URL) environment variable.
type: str
required: false
name:
@@ -83,7 +83,7 @@ options:
datacenter:
description:
- ID or country code of the datacenter where the load balancer will be created.
- - If not specified, it defaults to I(US).
+ - If not specified, it defaults to V(US).
type: str
choices: [ "US", "ES", "DE", "GB" ]
required: false
@@ -148,7 +148,6 @@ options:
requirements:
- "1and1"
- - "python >= 2.6"
author:
- Amel Ajdinovic (@aajdinov)
diff --git a/ansible_collections/community/general/plugins/modules/oneandone_monitoring_policy.py b/ansible_collections/community/general/plugins/modules/oneandone_monitoring_policy.py
index 6118645bf..abdf8ca7a 100644
--- a/ansible_collections/community/general/plugins/modules/oneandone_monitoring_policy.py
+++ b/ansible_collections/community/general/plugins/modules/oneandone_monitoring_policy.py
@@ -207,7 +207,6 @@ options:
requirements:
- "1and1"
- - "python >= 2.6"
author:
- "Amel Ajdinovic (@aajdinov)"
diff --git a/ansible_collections/community/general/plugins/modules/oneandone_private_network.py b/ansible_collections/community/general/plugins/modules/oneandone_private_network.py
index 114bf2f22..cf74597ed 100644
--- a/ansible_collections/community/general/plugins/modules/oneandone_private_network.py
+++ b/ansible_collections/community/general/plugins/modules/oneandone_private_network.py
@@ -95,7 +95,6 @@ options:
requirements:
- "1and1"
- - "python >= 2.6"
author:
- Amel Ajdinovic (@aajdinov)
diff --git a/ansible_collections/community/general/plugins/modules/oneandone_public_ip.py b/ansible_collections/community/general/plugins/modules/oneandone_public_ip.py
index df5476feb..2dceb41bf 100644
--- a/ansible_collections/community/general/plugins/modules/oneandone_public_ip.py
+++ b/ansible_collections/community/general/plugins/modules/oneandone_public_ip.py
@@ -81,7 +81,6 @@ options:
requirements:
- "1and1"
- - "python >= 2.6"
author:
- Amel Ajdinovic (@aajdinov)
diff --git a/ansible_collections/community/general/plugins/modules/oneandone_server.py b/ansible_collections/community/general/plugins/modules/oneandone_server.py
index 59f504178..b6653b48b 100644
--- a/ansible_collections/community/general/plugins/modules/oneandone_server.py
+++ b/ansible_collections/community/general/plugins/modules/oneandone_server.py
@@ -62,7 +62,7 @@ options:
- The instance size name or ID of the server.
It is required only for 'present' state, and it is mutually exclusive with
vcore, cores_per_processor, ram, and hdds parameters.
- - 'The available choices are: C(S), C(M), C(L), C(XL), C(XXL), C(3XL), C(4XL), C(5XL)'
+ - 'The available choices are: V(S), V(M), V(L), V(XL), V(XXL), V(3XL), V(4XL), V(5XL)'
type: str
vcore:
description:
@@ -148,7 +148,6 @@ options:
requirements:
- "1and1"
- - "python >= 2.6"
author:
- "Amel Ajdinovic (@aajdinov)"
diff --git a/ansible_collections/community/general/plugins/modules/onepassword_info.py b/ansible_collections/community/general/plugins/modules/onepassword_info.py
index bb814c443..b63352790 100644
--- a/ansible_collections/community/general/plugins/modules/onepassword_info.py
+++ b/ansible_collections/community/general/plugins/modules/onepassword_info.py
@@ -20,15 +20,12 @@ requirements:
- C(op) 1Password command line utility. See U(https://support.1password.com/command-line/)
notes:
- Tested with C(op) version 0.5.5
- - "Based on the C(onepassword) lookup plugin by Scott Buchanan <sbuchanan@ri.pn>."
+ - "Based on the P(community.general.onepassword#lookup) lookup plugin by Scott Buchanan <sbuchanan@ri.pn>."
short_description: Gather items from 1Password
description:
- M(community.general.onepassword_info) wraps the C(op) command line utility to fetch data about one or more 1Password items.
- A fatal error occurs if any of the items being searched for can not be found.
- Recommend using with the C(no_log) option to avoid logging the values of the secrets being retrieved.
- - This module was called C(onepassword_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(community.general.onepassword_info) module no longer returns C(ansible_facts)!
- You must now use the C(register) option to use the facts in other tasks.
extends_documentation_fragment:
- community.general.attributes
- community.general.attributes.info_module
@@ -39,7 +36,7 @@ options:
description:
- A list of one or more search terms.
- Each search term can either be a simple string or it can be a dictionary for more control.
- - When passing a simple string, I(field) is assumed to be C(password).
+ - When passing a simple string, O(search_terms[].field) is assumed to be V(password).
- When passing a dictionary, the following fields are available.
suboptions:
name:
@@ -82,7 +79,7 @@ options:
type: str
description:
- The master password for your subdomain.
- - This is always required when specifying C(auto_login).
+ - This is always required when specifying O(auto_login).
required: true
secret_key:
type: str
diff --git a/ansible_collections/community/general/plugins/modules/oneview_datacenter_info.py b/ansible_collections/community/general/plugins/modules/oneview_datacenter_info.py
index 541f3d669..ed04e2279 100644
--- a/ansible_collections/community/general/plugins/modules/oneview_datacenter_info.py
+++ b/ansible_collections/community/general/plugins/modules/oneview_datacenter_info.py
@@ -13,8 +13,6 @@ module: oneview_datacenter_info
short_description: Retrieve information about the OneView Data Centers
description:
- Retrieve information about the OneView Data Centers.
- - This module was called C(oneview_datacenter_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(community.general.oneview_datacenter_info) module no longer returns C(ansible_facts)!
requirements:
- "hpOneView >= 2.0.1"
author:
diff --git a/ansible_collections/community/general/plugins/modules/oneview_enclosure_info.py b/ansible_collections/community/general/plugins/modules/oneview_enclosure_info.py
index 3e593b7ae..4e203a50a 100644
--- a/ansible_collections/community/general/plugins/modules/oneview_enclosure_info.py
+++ b/ansible_collections/community/general/plugins/modules/oneview_enclosure_info.py
@@ -14,8 +14,6 @@ module: oneview_enclosure_info
short_description: Retrieve information about one or more Enclosures
description:
- Retrieve information about one or more of the Enclosures from OneView.
- - This module was called C(oneview_enclosure_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(community.general.oneview_enclosure_info) module no longer returns C(ansible_facts)!
requirements:
- hpOneView >= 2.0.1
author:
@@ -34,7 +32,7 @@ options:
options:
description:
- "List with options to gather additional information about an Enclosure and related resources.
- Options allowed: C(script), C(environmentalConfiguration), and C(utilization). For the option C(utilization),
+ Options allowed: V(script), V(environmentalConfiguration), and V(utilization). For the option V(utilization),
you can provide specific parameters."
type: list
elements: raw
@@ -77,7 +75,7 @@ EXAMPLES = '''
delegate_to: localhost
register: result
-- name: Print fetched information about paginated, filtered ans sorted list of Enclosures
+- name: Print fetched information about paginated, filtered and sorted list of Enclosures
ansible.builtin.debug:
msg: "{{ result.enclosures }}"
diff --git a/ansible_collections/community/general/plugins/modules/oneview_ethernet_network.py b/ansible_collections/community/general/plugins/modules/oneview_ethernet_network.py
index 8eb63db5a..981d949cd 100644
--- a/ansible_collections/community/general/plugins/modules/oneview_ethernet_network.py
+++ b/ansible_collections/community/general/plugins/modules/oneview_ethernet_network.py
@@ -28,9 +28,9 @@ options:
state:
description:
- Indicates the desired state for the Ethernet Network resource.
- - C(present) will ensure data properties are compliant with OneView.
- - C(absent) will remove the resource from OneView, if it exists.
- - C(default_bandwidth_reset) will reset the network connection template to the default.
+ - V(present) will ensure data properties are compliant with OneView.
+ - V(absent) will remove the resource from OneView, if it exists.
+ - V(default_bandwidth_reset) will reset the network connection template to the default.
type: str
default: present
choices: [present, absent, default_bandwidth_reset]
diff --git a/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_info.py b/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_info.py
index e107f3b47..7da008b04 100644
--- a/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_info.py
+++ b/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_info.py
@@ -13,8 +13,6 @@ module: oneview_ethernet_network_info
short_description: Retrieve the information about one or more of the OneView Ethernet Networks
description:
- Retrieve the information about one or more of the Ethernet Networks from OneView.
- - This module was called C(oneview_ethernet_network_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(community.general.oneview_ethernet_network_info) module no longer returns C(ansible_facts)!
requirements:
- hpOneView >= 2.0.1
author:
@@ -33,7 +31,7 @@ options:
options:
description:
- "List with options to gather additional information about an Ethernet Network and related resources.
- Options allowed: C(associatedProfiles) and C(associatedUplinkGroups)."
+ Options allowed: V(associatedProfiles) and V(associatedUplinkGroups)."
type: list
elements: str
extends_documentation_fragment:
diff --git a/ansible_collections/community/general/plugins/modules/oneview_fc_network.py b/ansible_collections/community/general/plugins/modules/oneview_fc_network.py
index 4c5f867e2..9f0c4358b 100644
--- a/ansible_collections/community/general/plugins/modules/oneview_fc_network.py
+++ b/ansible_collections/community/general/plugins/modules/oneview_fc_network.py
@@ -25,8 +25,8 @@ options:
state:
description:
- Indicates the desired state for the Fibre Channel Network resource.
- C(present) will ensure data properties are compliant with OneView.
- C(absent) will remove the resource from OneView, if it exists.
+ V(present) will ensure data properties are compliant with OneView.
+ V(absent) will remove the resource from OneView, if it exists.
type: str
choices: ['present', 'absent']
required: true
diff --git a/ansible_collections/community/general/plugins/modules/oneview_fc_network_info.py b/ansible_collections/community/general/plugins/modules/oneview_fc_network_info.py
index d4044b08b..096af4830 100644
--- a/ansible_collections/community/general/plugins/modules/oneview_fc_network_info.py
+++ b/ansible_collections/community/general/plugins/modules/oneview_fc_network_info.py
@@ -13,8 +13,6 @@ module: oneview_fc_network_info
short_description: Retrieve the information about one or more of the OneView Fibre Channel Networks
description:
- Retrieve the information about one or more of the Fibre Channel Networks from OneView.
- - This module was called C(oneview_fc_network_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(community.general.oneview_fc_network_info) module no longer returns C(ansible_facts)!
requirements:
- hpOneView >= 2.0.1
author:
diff --git a/ansible_collections/community/general/plugins/modules/oneview_fcoe_network.py b/ansible_collections/community/general/plugins/modules/oneview_fcoe_network.py
index 73eef5af0..e1216b1d9 100644
--- a/ansible_collections/community/general/plugins/modules/oneview_fcoe_network.py
+++ b/ansible_collections/community/general/plugins/modules/oneview_fcoe_network.py
@@ -14,7 +14,7 @@ short_description: Manage OneView FCoE Network resources
description:
- Provides an interface to manage FCoE Network resources. Can create, update, or delete.
requirements:
- - "python >= 2.7.9"
+ - "Python >= 2.7.9"
- "hpOneView >= 4.0.0"
author: "Felipe Bulsoni (@fgbulsoni)"
attributes:
@@ -26,8 +26,8 @@ options:
state:
description:
- Indicates the desired state for the FCoE Network resource.
- C(present) will ensure data properties are compliant with OneView.
- C(absent) will remove the resource from OneView, if it exists.
+ V(present) will ensure data properties are compliant with OneView.
+ V(absent) will remove the resource from OneView, if it exists.
type: str
default: present
choices: ['present', 'absent']
diff --git a/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_info.py b/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_info.py
index d9ee1b379..b3460d59a 100644
--- a/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_info.py
+++ b/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_info.py
@@ -13,8 +13,6 @@ module: oneview_fcoe_network_info
short_description: Retrieve the information about one or more of the OneView FCoE Networks
description:
- Retrieve the information about one or more of the FCoE Networks from OneView.
- - This module was called C(oneview_fcoe_network_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(community.general.oneview_fcoe_network_info) module no longer returns C(ansible_facts)!
requirements:
- hpOneView >= 2.0.1
author:
diff --git a/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group.py b/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group.py
index cd8e87528..d1303f011 100644
--- a/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group.py
+++ b/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group.py
@@ -29,8 +29,8 @@ options:
state:
description:
- Indicates the desired state for the Logical Interconnect Group resource.
- C(absent) will remove the resource from OneView, if it exists.
- C(present) will ensure data properties are compliant with OneView.
+ V(absent) will remove the resource from OneView, if it exists.
+ V(present) will ensure data properties are compliant with OneView.
type: str
choices: [absent, present]
default: present
diff --git a/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_info.py b/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_info.py
index 0111bf2c1..6f6a908f2 100644
--- a/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_info.py
+++ b/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_info.py
@@ -14,8 +14,6 @@ module: oneview_logical_interconnect_group_info
short_description: Retrieve information about one or more of the OneView Logical Interconnect Groups
description:
- Retrieve information about one or more of the Logical Interconnect Groups from OneView
- - This module was called C(oneview_logical_interconnect_group_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(community.general.oneview_logical_interconnect_group_info) module no longer returns C(ansible_facts)!
requirements:
- hpOneView >= 2.0.1
author:
diff --git a/ansible_collections/community/general/plugins/modules/oneview_network_set.py b/ansible_collections/community/general/plugins/modules/oneview_network_set.py
index a6a62a05c..0efd417d6 100644
--- a/ansible_collections/community/general/plugins/modules/oneview_network_set.py
+++ b/ansible_collections/community/general/plugins/modules/oneview_network_set.py
@@ -28,8 +28,8 @@ options:
state:
description:
- Indicates the desired state for the Network Set resource.
- - C(present) will ensure data properties are compliant with OneView.
- - C(absent) will remove the resource from OneView, if it exists.
+ - V(present) will ensure data properties are compliant with OneView.
+ - V(absent) will remove the resource from OneView, if it exists.
type: str
default: present
choices: ['present', 'absent']
diff --git a/ansible_collections/community/general/plugins/modules/oneview_network_set_info.py b/ansible_collections/community/general/plugins/modules/oneview_network_set_info.py
index d1a1f2913..cef53d8fc 100644
--- a/ansible_collections/community/general/plugins/modules/oneview_network_set_info.py
+++ b/ansible_collections/community/general/plugins/modules/oneview_network_set_info.py
@@ -13,8 +13,6 @@ module: oneview_network_set_info
short_description: Retrieve information about the OneView Network Sets
description:
- Retrieve information about the Network Sets from OneView.
- - This module was called C(oneview_network_set_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(community.general.oneview_network_set_info) module no longer returns C(ansible_facts)!
requirements:
- hpOneView >= 2.0.1
author:
@@ -34,8 +32,8 @@ options:
options:
description:
- "List with options to gather information about Network Set.
- Option allowed: C(withoutEthernet).
- The option C(withoutEthernet) retrieves the list of network_sets excluding Ethernet networks."
+ Option allowed: V(withoutEthernet).
+ The option V(withoutEthernet) retrieves the list of network_sets excluding Ethernet networks."
type: list
elements: str
diff --git a/ansible_collections/community/general/plugins/modules/oneview_san_manager.py b/ansible_collections/community/general/plugins/modules/oneview_san_manager.py
index 65a016b1c..15282aec2 100644
--- a/ansible_collections/community/general/plugins/modules/oneview_san_manager.py
+++ b/ansible_collections/community/general/plugins/modules/oneview_san_manager.py
@@ -28,9 +28,9 @@ options:
state:
description:
- Indicates the desired state for the Uplink Set resource.
- - C(present) ensures data properties are compliant with OneView.
- - C(absent) removes the resource from OneView, if it exists.
- - C(connection_information_set) updates the connection information for the SAN Manager. This operation is non-idempotent.
+ - V(present) ensures data properties are compliant with OneView.
+ - V(absent) removes the resource from OneView, if it exists.
+ - V(connection_information_set) updates the connection information for the SAN Manager. This operation is non-idempotent.
type: str
default: present
choices: [present, absent, connection_information_set]
diff --git a/ansible_collections/community/general/plugins/modules/oneview_san_manager_info.py b/ansible_collections/community/general/plugins/modules/oneview_san_manager_info.py
index 9b00a6bb5..f994280ca 100644
--- a/ansible_collections/community/general/plugins/modules/oneview_san_manager_info.py
+++ b/ansible_collections/community/general/plugins/modules/oneview_san_manager_info.py
@@ -13,8 +13,6 @@ module: oneview_san_manager_info
short_description: Retrieve information about one or more of the OneView SAN Managers
description:
- Retrieve information about one or more of the SAN Managers from OneView
- - This module was called C(oneview_san_manager_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(community.general.oneview_san_manager_info) module no longer returns C(ansible_facts)!
requirements:
- hpOneView >= 2.0.1
author:
@@ -34,10 +32,10 @@ options:
description:
- List of params to delimit, filter and sort the list of resources.
- "params allowed:
- - C(start): The first item to return, using 0-based indexing.
- - C(count): The number of resources to return.
- - C(query): A general query string to narrow the list of resources returned.
- - C(sort): The sort order of the returned data set."
+ - V(start): The first item to return, using 0-based indexing.
+ - V(count): The number of resources to return.
+ - V(query): A general query string to narrow the list of resources returned.
+ - V(sort): The sort order of the returned data set."
type: dict
extends_documentation_fragment:
- community.general.oneview
diff --git a/ansible_collections/community/general/plugins/modules/open_iscsi.py b/ansible_collections/community/general/plugins/modules/open_iscsi.py
index af08d1c54..163042cc4 100644
--- a/ansible_collections/community/general/plugins/modules/open_iscsi.py
+++ b/ansible_collections/community/general/plugins/modules/open_iscsi.py
@@ -85,7 +85,7 @@ options:
- Whether the list of target nodes on the portal should be
(re)discovered and added to the persistent iSCSI database.
- Keep in mind that C(iscsiadm) discovery resets configuration, like C(node.startup)
- to manual, hence combined with I(auto_node_startup=true) will always return
+ to manual, hence combined with O(auto_node_startup=true) will always return
a changed state.
type: bool
default: false
@@ -97,7 +97,7 @@ options:
rescan:
description:
- Rescan an established session for discovering new targets.
- - When I(target) is omitted, will rescan all sessions.
+ - When O(target) is omitted, will rescan all sessions.
type: bool
default: false
version_added: 4.1.0
diff --git a/ansible_collections/community/general/plugins/modules/openbsd_pkg.py b/ansible_collections/community/general/plugins/modules/openbsd_pkg.py
index 2baea828a..c83113611 100644
--- a/ansible_collections/community/general/plugins/modules/openbsd_pkg.py
+++ b/ansible_collections/community/general/plugins/modules/openbsd_pkg.py
@@ -34,9 +34,9 @@ options:
elements: str
state:
description:
- - C(present) will make sure the package is installed.
- C(latest) will make sure the latest version of the package is installed.
- C(absent) will make sure the specified package is not installed.
+ - V(present) will make sure the package is installed.
+ - V(latest) will make sure the latest version of the package is installed.
+ - V(absent) will make sure the specified package is not installed.
choices: [ absent, latest, present, installed, removed ]
default: present
type: str
@@ -46,19 +46,19 @@ options:
a binary. Requires that the port source tree is already installed.
Automatically builds and installs the 'sqlports' package, if it is
not already installed.
- - Mutually exclusive with I(snapshot).
+ - Mutually exclusive with O(snapshot).
type: bool
default: false
snapshot:
description:
- Force C(%c) and C(%m) to expand to C(snapshots), even on a release kernel.
- - Mutually exclusive with I(build).
+ - Mutually exclusive with O(build).
type: bool
default: false
version_added: 1.3.0
ports_dir:
description:
- - When used in combination with the C(build) option, allows overriding
+ - When used in combination with the O(build) option, allows overriding
the default ports source directory.
default: /usr/ports
type: path
@@ -77,7 +77,7 @@ options:
default: false
notes:
- When used with a C(loop:) each package will be processed individually,
- it is much more efficient to pass the list directly to the I(name) option.
+ it is much more efficient to pass the list directly to the O(name) option.
'''
EXAMPLES = '''
@@ -169,7 +169,11 @@ def get_package_state(names, pkg_spec, module):
rc, stdout, stderr = execute_command(command, module)
if stderr:
- module.fail_json(msg="failed in get_package_state(): " + stderr)
+ match = re.search(r"^Can't find inst:%s$" % re.escape(name), stderr)
+ if match:
+ pkg_spec[name]['installed_state'] = False
+ else:
+ module.fail_json(msg="failed in get_package_state(): " + stderr)
if stdout:
# If the requested package name is just a stem, like "python", we may
diff --git a/ansible_collections/community/general/plugins/modules/openwrt_init.py b/ansible_collections/community/general/plugins/modules/openwrt_init.py
index a0e156b33..46fdea5e2 100644
--- a/ansible_collections/community/general/plugins/modules/openwrt_init.py
+++ b/ansible_collections/community/general/plugins/modules/openwrt_init.py
@@ -32,8 +32,9 @@ options:
state:
type: str
description:
- - C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
- C(restarted) will always bounce the service. C(reloaded) will always reload.
+ - V(started)/V(stopped) are idempotent actions that will not run commands unless necessary.
+ - V(restarted) will always bounce the service.
+ - V(reloaded) will always reload.
choices: [ 'started', 'stopped', 'restarted', 'reloaded' ]
enabled:
description:
@@ -43,7 +44,7 @@ options:
type: str
description:
- If the service does not respond to the 'running' command, name a
- substring to look for as would be found in the output of the I(ps)
+ substring to look for as would be found in the output of the C(ps)
command as a stand-in for a 'running' result. If the string is found,
the service will be assumed to be running.
notes:
diff --git a/ansible_collections/community/general/plugins/modules/opkg.py b/ansible_collections/community/general/plugins/modules/opkg.py
index d2ac314d0..757c88c5d 100644
--- a/ansible_collections/community/general/plugins/modules/opkg.py
+++ b/ansible_collections/community/general/plugins/modules/opkg.py
@@ -46,6 +46,8 @@ options:
force:
description:
- The C(opkg --force) parameter used.
+ - Passing V("") as value and not passing any value at all have both
+ the same effect of B(not) using any C(--force-) parameter.
choices:
- ""
- "depends"
@@ -58,13 +60,17 @@ options:
- "remove"
- "checksum"
- "removal-of-dependent-packages"
- default: ""
type: str
update_cache:
description:
- Update the package DB first.
default: false
type: bool
+ executable:
+ description:
+ - The executable location for C(opkg).
+ type: path
+ version_added: 7.2.0
requirements:
- opkg
- python
@@ -105,6 +111,7 @@ EXAMPLES = '''
force: overwrite
'''
+import os
from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper
@@ -114,9 +121,10 @@ class Opkg(StateModuleHelper):
argument_spec=dict(
name=dict(aliases=["pkg"], required=True, type="list", elements="str"),
state=dict(default="present", choices=["present", "installed", "absent", "removed"]),
- force=dict(default="", choices=["", "depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", "postinstall", "remove",
- "checksum", "removal-of-dependent-packages"]),
+ force=dict(choices=["", "depends", "maintainer", "reinstall", "overwrite", "downgrade", "space",
+ "postinstall", "remove", "checksum", "removal-of-dependent-packages"]),
update_cache=dict(default=False, type='bool'),
+ executable=dict(type="path"),
),
)
@@ -137,15 +145,18 @@ class Opkg(StateModuleHelper):
value = None
return cmd_runner_fmt.as_optval("--force-")(value, ctx_ignore_none=True)
+ dir, cmd = os.path.split(self.vars.executable) if self.vars.executable else (None, "opkg")
+
self.runner = CmdRunner(
self.module,
- command="opkg",
+ command=cmd,
arg_formats=dict(
package=cmd_runner_fmt.as_list(),
state=cmd_runner_fmt.as_map(state_map),
force=cmd_runner_fmt.as_func(_force),
- update_cache=cmd_runner_fmt.as_bool("update")
+ update_cache=cmd_runner_fmt.as_bool("update"),
),
+ path_prefix=dir,
)
if self.vars.update_cache:
diff --git a/ansible_collections/community/general/plugins/modules/osx_defaults.py b/ansible_collections/community/general/plugins/modules/osx_defaults.py
index 161584373..336e95332 100644
--- a/ansible_collections/community/general/plugins/modules/osx_defaults.py
+++ b/ansible_collections/community/general/plugins/modules/osx_defaults.py
@@ -38,7 +38,7 @@ options:
host:
description:
- The host on which the preference should apply.
- - The special value C(currentHost) corresponds to the C(-currentHost) switch of the defaults commandline tool.
+ - The special value V(currentHost) corresponds to the C(-currentHost) switch of the defaults commandline tool.
type: str
key:
description:
@@ -58,13 +58,12 @@ options:
value:
description:
- The value to write.
- - Only required when I(state=present).
+ - Only required when O(state=present).
type: raw
state:
description:
- The state of the user defaults.
- - If set to C(list) will query the given parameter specified by C(key). Returns 'null' is nothing found or mis-spelled.
- - C(list) added in version 2.8.
+ - If set to V(list) will query the given parameter specified by O(key). Returns V(null) is nothing found or mis-spelled.
type: str
choices: [ absent, list, present ]
default: present
diff --git a/ansible_collections/community/general/plugins/modules/ovh_ip_failover.py b/ansible_collections/community/general/plugins/modules/ovh_ip_failover.py
index cd3639a4c..58d340e3e 100644
--- a/ansible_collections/community/general/plugins/modules/ovh_ip_failover.py
+++ b/ansible_collections/community/general/plugins/modules/ovh_ip_failover.py
@@ -19,7 +19,7 @@ description:
author: "Pascal HERAUD (@pascalheraud)"
notes:
- Uses the python OVH Api U(https://github.com/ovh/python-ovh).
- You have to create an application (a key and secret) with a consummer
+ You have to create an application (a key and secret) with a consumer
key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/)
requirements:
- ovh >= 0.4.8
diff --git a/ansible_collections/community/general/plugins/modules/ovh_monthly_billing.py b/ansible_collections/community/general/plugins/modules/ovh_monthly_billing.py
index 43d64e618..c2f503e3a 100644
--- a/ansible_collections/community/general/plugins/modules/ovh_monthly_billing.py
+++ b/ansible_collections/community/general/plugins/modules/ovh_monthly_billing.py
@@ -16,7 +16,7 @@ author: Francois Lallart (@fraff)
version_added: '0.2.0'
short_description: Manage OVH monthly billing
description:
- - Enable monthly billing on OVH cloud intances (be aware OVH does not allow to disable it).
+ - Enable monthly billing on OVH cloud instances (be aware OVH does not allow to disable it).
requirements: [ "ovh" ]
extends_documentation_fragment:
- community.general.attributes
diff --git a/ansible_collections/community/general/plugins/modules/pacemaker_cluster.py b/ansible_collections/community/general/plugins/modules/pacemaker_cluster.py
index 47b827908..60d8656ac 100644
--- a/ansible_collections/community/general/plugins/modules/pacemaker_cluster.py
+++ b/ansible_collections/community/general/plugins/modules/pacemaker_cluster.py
@@ -188,6 +188,8 @@ def main():
if cluster_state == state:
module.exit_json(changed=changed, out=cluster_state)
else:
+ if module.check_mode:
+ module.exit_json(changed=True)
set_cluster(module, state, timeout, force)
cluster_state = get_cluster_status(module)
if cluster_state == state:
@@ -201,12 +203,16 @@ def main():
if node_state[1].strip().lower() == state:
module.exit_json(changed=changed, out=cluster_state)
else:
+ if module.check_mode:
+ module.exit_json(changed=True)
# Set cluster status if needed
set_cluster(module, state, timeout, force)
cluster_state = get_node_status(module, node)
module.exit_json(changed=True, out=cluster_state)
if state in ['restart']:
+ if module.check_mode:
+ module.exit_json(changed=True)
set_cluster(module, 'offline', timeout, force)
cluster_state = get_cluster_status(module)
if cluster_state == 'offline':
@@ -220,6 +226,8 @@ def main():
module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be stopped")
if state in ['cleanup']:
+ if module.check_mode:
+ module.exit_json(changed=True)
clean_cluster(module, timeout)
cluster_state = get_cluster_status(module)
module.exit_json(changed=True,
diff --git a/ansible_collections/community/general/plugins/modules/packet_device.py b/ansible_collections/community/general/plugins/modules/packet_device.py
index d220c5f8f..519a7031e 100644
--- a/ansible_collections/community/general/plugins/modules/packet_device.py
+++ b/ansible_collections/community/general/plugins/modules/packet_device.py
@@ -40,7 +40,7 @@ attributes:
options:
auth_token:
description:
- - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+ - Packet API token. You can also supply it in environment variable E(PACKET_API_TOKEN).
type: str
count:
@@ -82,8 +82,8 @@ options:
hostnames:
description:
- A hostname of a device, or a list of hostnames.
- - If given string or one-item list, you can use the C("%d") Python string format to expand numbers from I(count).
- - If only one hostname, it might be expanded to list if I(count)>1.
+ - If given string or one-item list, you can use the C("%d") Python string format to expand numbers from O(count).
+ - If only one hostname, it might be expanded to list if O(count)>1.
aliases: [name]
type: list
elements: str
@@ -114,8 +114,8 @@ options:
state:
description:
- Desired state of the device.
- - If set to C(present) (the default), the module call will return immediately after the device-creating HTTP request successfully returns.
- - If set to C(active), the module call will block until all the specified devices are in state active due to the Packet API, or until I(wait_timeout).
+ - If set to V(present) (the default), the module call will return immediately after the device-creating HTTP request successfully returns.
+ - If set to V(active), the module call will block until all the specified devices are in state active due to the Packet API, or until O(wait_timeout).
choices: [present, absent, active, inactive, rebooted]
default: present
type: str
@@ -135,8 +135,8 @@ options:
wait_timeout:
description:
- - How long (seconds) to wait either for automatic IP address assignment, or for the device to reach the C(active) I(state).
- - If I(wait_for_public_IPv) is set and I(state) is C(active), the module will wait for both events consequently, applying the timeout twice.
+ - How long (seconds) to wait either for automatic IP address assignment, or for the device to reach the V(active) state.
+ - If O(wait_for_public_IPv) is set and O(state=active), the module will wait for both events consequently, applying the timeout twice.
default: 900
type: int
@@ -161,7 +161,7 @@ requirements:
'''
EXAMPLES = '''
-# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# All the examples assume that you have your Packet API token in environment variable PACKET_API_TOKEN.
# You can also pass it to the auth_token parameter of the module instead.
# Creating devices
diff --git a/ansible_collections/community/general/plugins/modules/packet_ip_subnet.py b/ansible_collections/community/general/plugins/modules/packet_ip_subnet.py
index afeb7ea04..530cfe3a7 100644
--- a/ansible_collections/community/general/plugins/modules/packet_ip_subnet.py
+++ b/ansible_collections/community/general/plugins/modules/packet_ip_subnet.py
@@ -40,7 +40,7 @@ attributes:
options:
auth_token:
description:
- - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+ - Packet API token. You can also supply it in environment variable E(PACKET_API_TOKEN).
type: str
hostname:
@@ -77,16 +77,15 @@ options:
state:
description:
- Desired state of the IP subnet on the specified device.
- - With state == C(present), you must specify either hostname or device_id. Subnet with given CIDR will then be assigned to the specified device.
- - With state == C(absent), you can specify either hostname or device_id. The subnet will be removed from specified devices.
- - If you leave both hostname and device_id empty, the subnet will be removed from any device it's assigned to.
+ - With O(state=present), you must specify either O(hostname) or O(device_id). Subnet with given CIDR will then be assigned to the specified device.
+ - With O(state=absent), you can specify either O(hostname) or O(device_id). The subnet will be removed from specified devices.
+ - If you leave both O(hostname) and O(device_id) empty, the subnet will be removed from any device it's assigned to.
choices: ['present', 'absent']
default: 'present'
type: str
requirements:
- "packet-python >= 1.35"
- - "python >= 2.6"
'''
EXAMPLES = '''
diff --git a/ansible_collections/community/general/plugins/modules/packet_project.py b/ansible_collections/community/general/plugins/modules/packet_project.py
index da4a2bb89..d8c991dba 100644
--- a/ansible_collections/community/general/plugins/modules/packet_project.py
+++ b/ansible_collections/community/general/plugins/modules/packet_project.py
@@ -51,7 +51,7 @@ options:
auth_token:
description:
- - Packet api token. You can also supply it in env var C(PACKET_API_TOKEN).
+ - Packet api token. You can also supply it in environment variable E(PACKET_API_TOKEN).
type: str
name:
@@ -76,7 +76,6 @@ options:
type: str
requirements:
- - "python >= 2.6"
- "packet-python >= 1.40"
'''
diff --git a/ansible_collections/community/general/plugins/modules/packet_sshkey.py b/ansible_collections/community/general/plugins/modules/packet_sshkey.py
index 97f55ba23..6519735dc 100644
--- a/ansible_collections/community/general/plugins/modules/packet_sshkey.py
+++ b/ansible_collections/community/general/plugins/modules/packet_sshkey.py
@@ -32,7 +32,7 @@ options:
type: str
auth_token:
description:
- - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+ - Packet API token. You can also supply it in environment variable E(PACKET_API_TOKEN).
type: str
label:
description:
@@ -57,7 +57,6 @@ options:
type: path
requirements:
- - "python >= 2.6"
- packet-python
'''
diff --git a/ansible_collections/community/general/plugins/modules/packet_volume.py b/ansible_collections/community/general/plugins/modules/packet_volume.py
index 910d64b55..659e8d8aa 100644
--- a/ansible_collections/community/general/plugins/modules/packet_volume.py
+++ b/ansible_collections/community/general/plugins/modules/packet_volume.py
@@ -50,7 +50,7 @@ options:
auth_token:
description:
- - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+ - Packet API token. You can also supply it in environment variable E(PACKET_API_TOKEN).
type: str
name:
@@ -122,7 +122,6 @@ options:
type: str
requirements:
- - "python >= 2.6"
- "packet-python >= 1.35"
'''
diff --git a/ansible_collections/community/general/plugins/modules/packet_volume_attachment.py b/ansible_collections/community/general/plugins/modules/packet_volume_attachment.py
index 7f6c68e05..a46fef55c 100644
--- a/ansible_collections/community/general/plugins/modules/packet_volume_attachment.py
+++ b/ansible_collections/community/general/plugins/modules/packet_volume_attachment.py
@@ -48,7 +48,7 @@ options:
auth_token:
description:
- - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+ - Packet API token. You can also supply it in environment variable E(PACKET_API_TOKEN).
type: str
project_id:
@@ -73,7 +73,6 @@ options:
type: str
requirements:
- - "python >= 2.6"
- "packet-python >= 1.35"
'''
diff --git a/ansible_collections/community/general/plugins/modules/pacman.py b/ansible_collections/community/general/plugins/modules/pacman.py
index 66f58155d..7f67b9103 100644
--- a/ansible_collections/community/general/plugins/modules/pacman.py
+++ b/ansible_collections/community/general/plugins/modules/pacman.py
@@ -34,17 +34,17 @@ options:
name:
description:
- Name or list of names of the package(s) or file(s) to install, upgrade, or remove.
- Can't be used in combination with C(upgrade).
+ Cannot be used in combination with O(upgrade).
aliases: [ package, pkg ]
type: list
elements: str
state:
description:
- - Whether to install (C(present) or C(installed), C(latest)), or remove (C(absent) or C(removed)) a package.
- - C(present) and C(installed) will simply ensure that a desired package is installed.
- - C(latest) will update the specified package if it is not of the latest available version.
- - C(absent) and C(removed) will remove the specified package.
+ - Whether to install (V(present) or V(installed), V(latest)), or remove (V(absent) or V(removed)) a package.
+ - V(present) and V(installed) will simply ensure that a desired package is installed.
+ - V(latest) will update the specified package if it is not of the latest available version.
+ - V(absent) and V(removed) will remove the specified package.
default: present
choices: [ absent, installed, latest, present, removed ]
type: str
@@ -52,9 +52,9 @@ options:
force:
description:
- When removing packages, forcefully remove them, without any checks.
- Same as I(extra_args="--nodeps --nodeps").
- When combined with I(update_cache), force a refresh of all package databases.
- Same as I(update_cache_extra_args="--refresh --refresh").
+ Same as O(extra_args="--nodeps --nodeps").
+ - When combined with O(update_cache), force a refresh of all package databases.
+ Same as O(update_cache_extra_args="--refresh --refresh").
default: false
type: bool
@@ -79,7 +79,7 @@ options:
extra_args:
description:
- - Additional option to pass to pacman when enforcing C(state).
+ - Additional option to pass to pacman when enforcing O(state).
default: ''
type: str
@@ -87,28 +87,28 @@ options:
description:
- Whether or not to refresh the master package lists.
- This can be run as part of a package installation or as a separate step.
- - If not specified, it defaults to C(false).
+ - If not specified, it defaults to V(false).
- Please note that this option only had an influence on the module's C(changed) state
- if I(name) and I(upgrade) are not specified before community.general 5.0.0.
+ if O(name) and O(upgrade) are not specified before community.general 5.0.0.
See the examples for how to keep the old behavior.
type: bool
update_cache_extra_args:
description:
- - Additional option to pass to pacman when enforcing C(update_cache).
+ - Additional option to pass to pacman when enforcing O(update_cache).
default: ''
type: str
upgrade:
description:
- Whether or not to upgrade the whole system.
- Can't be used in combination with C(name).
- - If not specified, it defaults to C(false).
+ Cannot be used in combination with O(name).
+ - If not specified, it defaults to V(false).
type: bool
upgrade_extra_args:
description:
- - Additional option to pass to pacman when enforcing C(upgrade).
+ - Additional option to pass to pacman when enforcing O(upgrade).
default: ''
type: str
@@ -121,8 +121,8 @@ options:
reason_for:
description:
- - Set the install reason for C(all) packages or only for C(new) packages.
- - In case of I(state=latest) already installed packages which will be updated to a newer version are not counted as C(new).
+ - Set the install reason for V(all) packages or only for V(new) packages.
+ - In case of O(state=latest) already installed packages which will be updated to a newer version are not counted as V(new).
default: new
choices: [ all, new ]
type: str
@@ -130,20 +130,23 @@ options:
notes:
- When used with a C(loop:) each package will be processed individually,
- it is much more efficient to pass the list directly to the I(name) option.
- - To use an AUR helper (I(executable) option), a few extra setup steps might be required beforehand.
+ it is much more efficient to pass the list directly to the O(name) option.
+ - To use an AUR helper (O(executable) option), a few extra setup steps might be required beforehand.
For example, a dedicated build user with permissions to install packages could be necessary.
+ - >
+ In the tests, while using C(yay) as the O(executable) option, the module failed to install AUR packages
+ with the error: C(error: target not found: <pkg>).
"""
RETURN = """
packages:
description:
- A list of packages that have been changed.
- - Before community.general 4.5.0 this was only returned when I(upgrade=true).
+ - Before community.general 4.5.0 this was only returned when O(upgrade=true).
In community.general 4.5.0, it was sometimes omitted when the package list is empty,
- but since community.general 4.6.0 it is always returned when I(name) is specified or
- I(upgrade=true).
- returned: success and I(name) is specified or I(upgrade=true)
+ but since community.general 4.6.0 it is always returned when O(name) is specified or
+ O(upgrade=true).
+ returned: success and O(name) is specified or O(upgrade=true)
type: list
elements: str
sample: [ package, other-package ]
@@ -151,8 +154,8 @@ packages:
cache_updated:
description:
- The changed status of C(pacman -Sy).
- - Useful when I(name) or I(upgrade=true) are specified next to I(update_cache=true).
- returned: success, when I(update_cache=true)
+ - Useful when O(name) or O(upgrade=true) are specified next to O(update_cache=true).
+ returned: success, when O(update_cache=true)
type: bool
sample: false
version_added: 4.6.0
@@ -263,6 +266,7 @@ EXAMPLES = """
reason_for: all
"""
+import re
import shlex
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict, namedtuple
@@ -418,7 +422,7 @@ class Pacman(object):
for p in name_ver:
# With Pacman v6.0.1 - libalpm v13.0.1, --upgrade outputs "loading packages..." on stdout. strip that.
# When installing from URLs, pacman can also output a 'nothing to do' message. strip that too.
- if "loading packages" in p or "there is nothing to do" in p:
+ if "loading packages" in p or "there is nothing to do" in p or 'Avoid running' in p:
continue
name, version = p.split()
if name in self.inventory["installed_pkgs"]:
@@ -706,11 +710,12 @@ class Pacman(object):
installed_pkgs = {}
dummy, stdout, dummy = self.m.run_command([self.pacman_path, "--query"], check_rc=True)
# Format of a line: "pacman 6.0.1-2"
+ query_re = re.compile(r'^\s*(?P<pkg>\S+)\s+(?P<ver>\S+)\s*$')
for l in stdout.splitlines():
- l = l.strip()
- if not l:
+ query_match = query_re.match(l)
+ if not query_match:
continue
- pkg, ver = l.split()
+ pkg, ver = query_match.groups()
installed_pkgs[pkg] = ver
installed_groups = defaultdict(set)
@@ -721,11 +726,12 @@ class Pacman(object):
# base-devel file
# base-devel findutils
# ...
+ query_groups_re = re.compile(r'^\s*(?P<group>\S+)\s+(?P<pkg>\S+)\s*$')
for l in stdout.splitlines():
- l = l.strip()
- if not l:
+ query_groups_match = query_groups_re.match(l)
+ if not query_groups_match:
continue
- group, pkgname = l.split()
+ group, pkgname = query_groups_match.groups()
installed_groups[group].add(pkgname)
available_pkgs = {}
@@ -747,11 +753,12 @@ class Pacman(object):
# vim-plugins vim-airline-themes
# vim-plugins vim-ale
# ...
+ sync_groups_re = re.compile(r'^\s*(?P<group>\S+)\s+(?P<pkg>\S+)\s*$')
for l in stdout.splitlines():
- l = l.strip()
- if not l:
+ sync_groups_match = sync_groups_re.match(l)
+ if not sync_groups_match:
continue
- group, pkg = l.split()
+ group, pkg = sync_groups_match.groups()
available_groups[group].add(pkg)
upgradable_pkgs = {}
@@ -759,9 +766,14 @@ class Pacman(object):
[self.pacman_path, "--query", "--upgrades"], check_rc=False
)
+ stdout = stdout.splitlines()
+ if stdout and "Avoid running" in stdout[0]:
+ stdout = stdout[1:]
+ stdout = "\n".join(stdout)
+
# non-zero exit with nothing in stdout -> nothing to upgrade, all good
# stderr can have warnings, so not checked here
- if rc == 1 and stdout == "":
+ if rc == 1 and not stdout:
pass # nothing to upgrade
elif rc == 0:
# Format of lines:
@@ -771,7 +783,7 @@ class Pacman(object):
l = l.strip()
if not l:
continue
- if "[ignored]" in l:
+ if "[ignored]" in l or "Avoid running" in l:
continue
s = l.split()
if len(s) != 4:
diff --git a/ansible_collections/community/general/plugins/modules/pacman_key.py b/ansible_collections/community/general/plugins/modules/pacman_key.py
index 4d4c4afac..4b7b2639e 100644
--- a/ansible_collections/community/general/plugins/modules/pacman_key.py
+++ b/ansible_collections/community/general/plugins/modules/pacman_key.py
@@ -19,10 +19,10 @@ description:
- Add or remove gpg keys from the pacman keyring.
notes:
- Use full-length key ID (40 characters).
- - Keys will be verified when using I(data), I(file), or I(url) unless I(verify) is overridden.
+ - Keys will be verified when using O(data), O(file), or O(url) unless O(verify) is overridden.
- Keys will be locally signed after being imported into the keyring.
- - If the key ID exists in the keyring, the key will not be added unless I(force_update) is specified.
- - I(data), I(file), I(url), and I(keyserver) are mutually exclusive.
+ - If the key ID exists in the keyring, the key will not be added unless O(force_update) is specified.
+ - O(data), O(file), O(url), and O(keyserver) are mutually exclusive.
requirements:
- gpg
- pacman-key
@@ -73,7 +73,7 @@ options:
keyring:
description:
- The full path to the keyring folder on the remote server.
- - If not specified, module will use pacman's default (C(/etc/pacman.d/gnupg)).
+ - If not specified, module will use pacman's default (V(/etc/pacman.d/gnupg)).
- Useful if the remote system requires an alternative gnupg directory.
type: path
default: /etc/pacman.d/gnupg
@@ -88,11 +88,13 @@ options:
EXAMPLES = '''
- name: Import a key via local file
community.general.pacman_key:
+ id: 01234567890ABCDE01234567890ABCDE12345678
data: "{{ lookup('file', 'keyfile.asc') }}"
state: present
- name: Import a key via remote file
community.general.pacman_key:
+ id: 01234567890ABCDE01234567890ABCDE12345678
file: /tmp/keyfile.asc
state: present
diff --git a/ansible_collections/community/general/plugins/modules/pagerduty.py b/ansible_collections/community/general/plugins/modules/pagerduty.py
index bed3629be..596c4f4da 100644
--- a/ansible_collections/community/general/plugins/modules/pagerduty.py
+++ b/ansible_collections/community/general/plugins/modules/pagerduty.py
@@ -43,7 +43,7 @@ options:
user:
type: str
description:
- - PagerDuty user ID. Obsolete. Please, use I(token) for authorization.
+ - PagerDuty user ID. Obsolete. Please, use O(token) for authorization.
token:
type: str
description:
@@ -80,7 +80,7 @@ options:
default: Created by Ansible
validate_certs:
description:
- - If C(false), SSL certificates will not be validated. This should only be used
+ - If V(false), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
type: bool
default: true
diff --git a/ansible_collections/community/general/plugins/modules/pagerduty_alert.py b/ansible_collections/community/general/plugins/modules/pagerduty_alert.py
index 45bec92c6..3c0327e5a 100644
--- a/ansible_collections/community/general/plugins/modules/pagerduty_alert.py
+++ b/ansible_collections/community/general/plugins/modules/pagerduty_alert.py
@@ -16,6 +16,7 @@ description:
- This module will let you trigger, acknowledge or resolve a PagerDuty incident by sending events
author:
- "Amanpreet Singh (@ApsOps)"
+ - "Xiao Shen (@xshen1)"
requirements:
- PagerDuty API access
extends_documentation_fragment:
@@ -30,20 +31,25 @@ options:
type: str
description:
- PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API.
+ api_key:
+ type: str
+ description:
+ - The pagerduty API key (readonly access), generated on the pagerduty site.
+ - Required if O(api_version=v1).
+ integration_key:
+ type: str
+ description:
+ - The GUID of one of your 'Generic API' services.
+ - This is the 'integration key' listed on a 'Integrations' tab of PagerDuty service.
service_id:
type: str
description:
- ID of PagerDuty service when incidents will be triggered, acknowledged or resolved.
- required: true
+ - Required if O(api_version=v1).
service_key:
type: str
description:
- - The GUID of one of your "Generic API" services. Obsolete. Please use I(integration_key).
- integration_key:
- type: str
- description:
- - The GUID of one of your "Generic API" services.
- - This is the "integration key" listed on a "Integrations" tab of PagerDuty service.
+ - The GUID of one of your 'Generic API' services. Obsolete. Please use O(integration_key).
state:
type: str
description:
@@ -53,40 +59,96 @@ options:
- 'triggered'
- 'acknowledged'
- 'resolved'
- api_key:
+ api_version:
type: str
description:
- - The pagerduty API key (readonly access), generated on the pagerduty site.
- required: true
+ - The API version we want to use to run the module.
+ - V1 is more limited with option we can provide to trigger incident.
+ - V2 has more variables for example, O(severity), O(source), O(custom_details), etc.
+ default: 'v1'
+ choices:
+ - 'v1'
+ - 'v2'
+ version_added: 7.4.0
+ client:
+ type: str
+ description:
+ - The name of the monitoring client that is triggering this event.
+ required: false
+ client_url:
+ type: str
+ description:
+ - The URL of the monitoring client that is triggering this event.
+ required: false
+ component:
+ type: str
+ description:
+ - Component of the source machine that is responsible for the event, for example C(mysql) or C(eth0).
+ required: false
+ version_added: 7.4.0
+ custom_details:
+ type: dict
+ description:
+ - Additional details about the event and affected system.
+ - A dictionary with custom keys and values.
+ required: false
+ version_added: 7.4.0
desc:
type: str
description:
- - For C(triggered) I(state) - Required. Short description of the problem that led to this trigger. This field (or a truncated version)
+ - For O(state=triggered) - Required. Short description of the problem that led to this trigger. This field (or a truncated version)
will be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI.
The maximum length is 1024 characters.
- - For C(acknowledged) or C(resolved) I(state) - Text that will appear in the incident's log associated with this event.
+ - For O(state=acknowledged) or O(state=resolved) - Text that will appear in the incident's log associated with this event.
required: false
default: Created via Ansible
+ incident_class:
+ type: str
+ description:
+ - The class/type of the event, for example C(ping failure) or C(cpu load).
+ required: false
+ version_added: 7.4.0
incident_key:
type: str
description:
- - Identifies the incident to which this I(state) should be applied.
- - For C(triggered) I(state) - If there's no open (i.e. unresolved) incident with this key, a new one will be created. If there's already an
- open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to "de-dup"
- problem reports.
- - For C(acknowledged) or C(resolved) I(state) - This should be the incident_key you received back when the incident was first opened by a
+ - Identifies the incident to which this O(state) should be applied.
+ - For O(state=triggered) - If there's no open (i.e. unresolved) incident with this key, a new one will be created. If there's already an
+ open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to 'de-dup'
+ problem reports. If no O(incident_key) is provided, then it will be generated by PagerDuty.
+ - For O(state=acknowledged) or O(state=resolved) - This should be the incident_key you received back when the incident was first opened by a
trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded.
required: false
- client:
+ link_url:
type: str
description:
- - The name of the monitoring client that is triggering this event.
+ - Relevant link url to the alert. For example, the website or the job link.
required: false
- client_url:
+ version_added: 7.4.0
+ link_text:
type: str
description:
- - The URL of the monitoring client that is triggering this event.
+ - A short description of the link_url.
required: false
+ version_added: 7.4.0
+ source:
+ type: str
+ description:
+ - The unique location of the affected system, preferably a hostname or FQDN.
+ - Required in case of O(state=trigger) and O(api_version=v2).
+ required: false
+ version_added: 7.4.0
+ severity:
+ type: str
+ description:
+ - The perceived severity of the status the event is describing with respect to the affected system.
+ - Required in case of O(state=trigger) and O(api_version=v2).
+ default: 'critical'
+ choices:
+ - 'critical'
+ - 'warning'
+ - 'error'
+ - 'info'
+ version_added: 7.4.0
'''
EXAMPLES = '''
@@ -127,12 +189,50 @@ EXAMPLES = '''
state: resolved
incident_key: somekey
desc: "some text for incident's log"
+
+- name: Trigger an v2 incident with just the basic options
+ community.general.pagerduty_alert:
+ integration_key: xxx
+ api_version: v2
+ source: My Ansible Script
+ state: triggered
+ desc: problem that led to this trigger
+
+- name: Trigger an v2 incident with more options
+ community.general.pagerduty_alert:
+ integration_key: xxx
+ api_version: v2
+ source: My Ansible Script
+ state: triggered
+ desc: problem that led to this trigger
+ incident_key: somekey
+ client: Sample Monitoring Service
+ client_url: http://service.example.com
+ component: mysql
+ incident_class: ping failure
+ link_url: https://pagerduty.com
+ link_text: PagerDuty
+
+- name: Acknowledge an incident based on incident_key using v2
+ community.general.pagerduty_alert:
+ api_version: v2
+ integration_key: xxx
+ incident_key: somekey
+ state: acknowledged
+
+- name: Resolve an incident based on incident_key
+ community.general.pagerduty_alert:
+ api_version: v2
+ integration_key: xxx
+ incident_key: somekey
+ state: resolved
'''
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.six.moves.urllib.parse import urlparse, urlencode, urlunparse
+from datetime import datetime
def check(module, name, state, service_id, integration_key, api_key, incident_key=None, http_call=fetch_url):
@@ -175,8 +275,8 @@ def check(module, name, state, service_id, integration_key, api_key, incident_ke
return incidents[0], False
-def send_event(module, service_key, event_type, desc,
- incident_key=None, client=None, client_url=None):
+def send_event_v1(module, service_key, event_type, desc,
+ incident_key=None, client=None, client_url=None):
url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json"
headers = {
"Content-type": "application/json"
@@ -200,61 +300,127 @@ def send_event(module, service_key, event_type, desc,
return json_out
+def send_event_v2(module, service_key, event_type, payload, link,
+ incident_key=None, client=None, client_url=None):
+ url = "https://events.pagerduty.com/v2/enqueue"
+ headers = {
+ "Content-type": "application/json"
+ }
+ data = {
+ "routing_key": service_key,
+ "event_action": event_type,
+ "payload": payload,
+ "client": client,
+ "client_url": client_url,
+ }
+ if link:
+ data["links"] = [link]
+ if incident_key:
+ data["dedup_key"] = incident_key
+ if event_type != "trigger":
+ data.pop("payload")
+ response, info = fetch_url(module, url, method="post",
+ headers=headers, data=json.dumps(data))
+ if info["status"] != 202:
+ module.fail_json(msg="failed to %s. Reason: %s" %
+ (event_type, info['msg']))
+ json_out = json.loads(response.read())
+ return json_out, True
+
+
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=False),
- service_id=dict(required=True),
- service_key=dict(required=False, no_log=True),
+ api_key=dict(required=False, no_log=True),
integration_key=dict(required=False, no_log=True),
- api_key=dict(required=True, no_log=True),
- state=dict(required=True,
- choices=['triggered', 'acknowledged', 'resolved']),
- client=dict(required=False, default=None),
- client_url=dict(required=False, default=None),
+ service_id=dict(required=False),
+ service_key=dict(required=False, no_log=True),
+ state=dict(
+ required=True, choices=['triggered', 'acknowledged', 'resolved']
+ ),
+ api_version=dict(type='str', default='v1', choices=['v1', 'v2']),
+ client=dict(required=False),
+ client_url=dict(required=False),
+ component=dict(required=False),
+ custom_details=dict(required=False, type='dict'),
desc=dict(required=False, default='Created via Ansible'),
- incident_key=dict(required=False, default=None, no_log=False)
+ incident_class=dict(required=False),
+ incident_key=dict(required=False, no_log=False),
+ link_url=dict(required=False),
+ link_text=dict(required=False),
+ source=dict(required=False),
+ severity=dict(
+ default='critical', choices=['critical', 'warning', 'error', 'info']
+ ),
),
- supports_check_mode=True
+ required_if=[
+ ('api_version', 'v1', ['service_id', 'api_key']),
+ ('state', 'acknowledged', ['incident_key']),
+ ('state', 'resolved', ['incident_key']),
+ ],
+ required_one_of=[('service_key', 'integration_key')],
+ supports_check_mode=True,
)
name = module.params['name']
- service_id = module.params['service_id']
- integration_key = module.params['integration_key']
- service_key = module.params['service_key']
- api_key = module.params['api_key']
- state = module.params['state']
- client = module.params['client']
- client_url = module.params['client_url']
- desc = module.params['desc']
- incident_key = module.params['incident_key']
-
+ service_id = module.params.get('service_id')
+ integration_key = module.params.get('integration_key')
+ service_key = module.params.get('service_key')
+ api_key = module.params.get('api_key')
+ state = module.params.get('state')
+ client = module.params.get('client')
+ client_url = module.params.get('client_url')
+ desc = module.params.get('desc')
+ incident_key = module.params.get('incident_key')
+ payload = {
+ 'summary': desc,
+ 'source': module.params.get('source'),
+ 'timestamp': datetime.now().isoformat(),
+ 'severity': module.params.get('severity'),
+ 'component': module.params.get('component'),
+ 'class': module.params.get('incident_class'),
+ 'custom_details': module.params.get('custom_details'),
+ }
+ link = {}
+ if module.params.get('link_url'):
+ link['href'] = module.params.get('link_url')
+ if module.params.get('link_text'):
+ link['text'] = module.params.get('link_text')
if integration_key is None:
- if service_key is not None:
- integration_key = service_key
- module.warn('"service_key" is obsolete parameter and will be removed.'
- ' Please, use "integration_key" instead')
- else:
- module.fail_json(msg="'integration_key' is required parameter")
+ integration_key = service_key
+ module.warn(
+ '"service_key" is obsolete parameter and will be removed.'
+ ' Please, use "integration_key" instead'
+ )
state_event_dict = {
'triggered': 'trigger',
'acknowledged': 'acknowledge',
- 'resolved': 'resolve'
+ 'resolved': 'resolve',
}
event_type = state_event_dict[state]
-
- if event_type != 'trigger' and incident_key is None:
- module.fail_json(msg="incident_key is required for "
- "acknowledge or resolve events")
-
- out, changed = check(module, name, state, service_id,
- integration_key, api_key, incident_key)
-
- if not module.check_mode and changed is True:
- out = send_event(module, integration_key, event_type, desc,
- incident_key, client, client_url)
+ if module.params.get('api_version') == 'v1':
+ out, changed = check(module, name, state, service_id,
+ integration_key, api_key, incident_key)
+ if not module.check_mode and changed is True:
+ out = send_event_v1(module, integration_key, event_type, desc,
+ incident_key, client, client_url)
+ else:
+ changed = True
+ if event_type == 'trigger' and not payload['source']:
+ module.fail_json(msg='"service" is a required variable for v2 api endpoint.')
+ out, changed = send_event_v2(
+ module,
+ integration_key,
+ event_type,
+ payload,
+ link,
+ incident_key,
+ client,
+ client_url,
+ )
module.exit_json(result=out, changed=changed)
diff --git a/ansible_collections/community/general/plugins/modules/pagerduty_change.py b/ansible_collections/community/general/plugins/modules/pagerduty_change.py
index 6af5d58ea..1a1e50dcf 100644
--- a/ansible_collections/community/general/plugins/modules/pagerduty_change.py
+++ b/ansible_collections/community/general/plugins/modules/pagerduty_change.py
@@ -25,7 +25,7 @@ attributes:
check_mode:
support: full
details:
- - Check mode simply does nothing except returning C(changed=true) in case the I(url) seems to be correct.
+ - Check mode simply does nothing except returning C(changed=true) in case the O(url) seems to be correct.
diff_mode:
support: none
options:
@@ -61,7 +61,7 @@ options:
type: str
environment:
description:
- - The environment name, typically C(production), C(staging), etc.
+ - The environment name, typically V(production), V(staging), and so on.
required: false
type: str
link_url:
@@ -82,7 +82,7 @@ options:
type: str
validate_certs:
description:
- - If C(false), SSL certificates for the target URL will not be validated.
+ - If V(false), SSL certificates for the target URL will not be validated.
This should only be used on personally controlled sites using self-signed certificates.
required: false
default: true
diff --git a/ansible_collections/community/general/plugins/modules/pagerduty_user.py b/ansible_collections/community/general/plugins/modules/pagerduty_user.py
index 9c9805bff..eb8a30956 100644
--- a/ansible_collections/community/general/plugins/modules/pagerduty_user.py
+++ b/ansible_collections/community/general/plugins/modules/pagerduty_user.py
@@ -40,7 +40,7 @@ options:
pd_email:
description:
- The user's email address.
- - I(pd_email) is the unique identifier used and cannot be updated using this module.
+ - O(pd_email) is the unique identifier used and cannot be updated using this module.
required: true
type: str
pd_role:
@@ -52,15 +52,15 @@ options:
state:
description:
- State of the user.
- - On C(present), it creates a user if the user doesn't exist.
- - On C(absent), it removes a user if the account exists.
+ - On V(present), it creates a user if the user doesn't exist.
+ - On V(absent), it removes a user if the account exists.
choices: ['present', 'absent']
default: 'present'
type: str
pd_teams:
description:
- The teams to which the user belongs.
- - Required if I(state=present).
+ - Required if O(state=present).
type: list
elements: str
'''
diff --git a/ansible_collections/community/general/plugins/modules/pam_limits.py b/ansible_collections/community/general/plugins/modules/pam_limits.py
index dbb70045d..f97ea6602 100644
--- a/ansible_collections/community/general/plugins/modules/pam_limits.py
+++ b/ansible_collections/community/general/plugins/modules/pam_limits.py
@@ -15,8 +15,8 @@ author:
- "Sebastien Rohaut (@usawa)"
short_description: Modify Linux PAM limits
description:
- - The C(pam_limits) module modifies PAM limits.
- - The default file is C(/etc/security/limits.conf).
+ - The M(community.general.pam_limits) module modifies PAM limits.
+ - The default file is V(/etc/security/limits.conf).
- For the full documentation, see C(man 5 limits.conf).
extends_documentation_fragment:
- community.general.attributes
@@ -68,8 +68,8 @@ options:
type: str
description:
- The value of the limit.
- - Value must either be C(unlimited), C(infinity) or C(-1), all of which indicate no limit, or a limit of 0 or larger.
- - Value must be a number in the range -20 to 19 inclusive, if I(limit_item) is set to C(nice) or C(priority).
+ - Value must either be V(unlimited), V(infinity) or V(-1), all of which indicate no limit, or a limit of 0 or larger.
+ - Value must be a number in the range -20 to 19 inclusive, if O(limit_item) is set to V(nice) or V(priority).
- Refer to the C(man 5 limits.conf) manual pages for more details.
required: true
backup:
@@ -81,7 +81,7 @@ options:
default: false
use_min:
description:
- - If set to C(true), the minimal value will be used or conserved.
+ - If set to V(true), the minimal value will be used or conserved.
- If the specified value is inferior to the value in the file,
file content is replaced with the new value, else content is not modified.
required: false
@@ -89,7 +89,7 @@ options:
default: false
use_max:
description:
- - If set to C(true), the maximal value will be used or conserved.
+ - If set to V(true), the maximal value will be used or conserved.
- If the specified value is superior to the value in the file,
file content is replaced with the new value, else content is not modified.
required: false
@@ -108,7 +108,7 @@ options:
required: false
default: ''
notes:
- - If I(dest) file does not exist, it is created.
+ - If O(dest) file does not exist, it is created.
'''
EXAMPLES = r'''
@@ -175,7 +175,6 @@ def main():
limits_conf = '/etc/security/limits.conf'
module = AnsibleModule(
- # not checking because of daisy chain to file module
argument_spec=dict(
domain=dict(required=True, type='str'),
limit_type=dict(required=True, type='str', choices=pam_types),
@@ -201,6 +200,7 @@ def main():
new_comment = module.params['comment']
changed = False
+ does_not_exist = False
if os.path.isfile(limits_conf):
if not os.access(limits_conf, os.W_OK):
@@ -208,7 +208,7 @@ def main():
else:
limits_conf_dir = os.path.dirname(limits_conf)
if os.path.isdir(limits_conf_dir) and os.access(limits_conf_dir, os.W_OK):
- open(limits_conf, 'a').close()
+ does_not_exist = True
changed = True
else:
module.fail_json(msg="directory %s is not writable (check presence, access rights, use sudo)" % limits_conf_dir)
@@ -224,15 +224,20 @@ def main():
space_pattern = re.compile(r'\s+')
+ if does_not_exist:
+ lines = []
+ else:
+ with open(limits_conf, 'rb') as f:
+ lines = list(f)
+
message = ''
- f = open(limits_conf, 'rb')
# Tempfile
nf = tempfile.NamedTemporaryFile(mode='w+')
found = False
new_value = value
- for line in f:
+ for line in lines:
line = to_native(line, errors='surrogate_or_strict')
if line.startswith('#'):
nf.write(line)
@@ -323,17 +328,17 @@ def main():
message = new_limit
nf.write(new_limit)
- f.close()
nf.flush()
- with open(limits_conf, 'r') as content:
- content_current = content.read()
-
with open(nf.name, 'r') as content:
content_new = content.read()
if not module.check_mode:
- # Copy tempfile to newfile
+ if does_not_exist:
+ with open(limits_conf, 'a'):
+ pass
+
+ # Move tempfile to newfile
module.atomic_move(nf.name, limits_conf)
try:
@@ -344,7 +349,7 @@ def main():
res_args = dict(
changed=changed,
msg=message,
- diff=dict(before=content_current, after=content_new),
+ diff=dict(before=b''.join(lines), after=content_new),
)
if backup:
diff --git a/ansible_collections/community/general/plugins/modules/pamd.py b/ansible_collections/community/general/plugins/modules/pamd.py
index 6ffc8624e..0ad4c8787 100644
--- a/ansible_collections/community/general/plugins/modules/pamd.py
+++ b/ansible_collections/community/general/plugins/modules/pamd.py
@@ -37,7 +37,7 @@ options:
type:
description:
- The type of the PAM rule being modified.
- - The C(type), C(control) and C(module_path) all must match a rule to be modified.
+ - The O(type), O(control), and O(module_path) options all must match a rule to be modified.
type: str
required: true
choices: [ account, -account, auth, -auth, password, -password, session, -session ]
@@ -46,13 +46,13 @@ options:
- The control of the PAM rule being modified.
- This may be a complicated control with brackets. If this is the case, be
sure to put "[bracketed controls]" in quotes.
- - The C(type), C(control) and C(module_path) all must match a rule to be modified.
+ - The O(type), O(control), and O(module_path) options all must match a rule to be modified.
type: str
required: true
module_path:
description:
- The module path of the PAM rule being modified.
- - The C(type), C(control) and C(module_path) all must match a rule to be modified.
+ - The O(type), O(control), and O(module_path) options all must match a rule to be modified.
type: str
required: true
new_type:
@@ -70,9 +70,9 @@ options:
type: str
module_arguments:
description:
- - When state is C(updated), the module_arguments will replace existing module_arguments.
- - When state is C(args_absent) args matching those listed in module_arguments will be removed.
- - When state is C(args_present) any args listed in module_arguments are added if
+ - When O(state=updated), the O(module_arguments) will replace existing module_arguments.
+ - When O(state=args_absent) args matching those listed in O(module_arguments) will be removed.
+ - When O(state=args_present) any args listed in O(module_arguments) are added if
missing from the existing rule.
- Furthermore, if the module argument takes a value denoted by C(=),
the value will be changed to that specified in module_arguments.
@@ -80,15 +80,15 @@ options:
elements: str
state:
description:
- - The default of C(updated) will modify an existing rule if type,
+ - The default of V(updated) will modify an existing rule if type,
control and module_path all match an existing rule.
- - With C(before), the new rule will be inserted before a rule matching type,
+ - With V(before), the new rule will be inserted before a rule matching type,
control and module_path.
- - Similarly, with C(after), the new rule will be inserted after an existing rulematching type,
+ - Similarly, with V(after), the new rule will be inserted after an existing rulematching type,
control and module_path.
- - With either C(before) or C(after) new_type, new_control, and new_module_path must all be specified.
- - If state is C(args_absent) or C(args_present), new_type, new_control, and new_module_path will be ignored.
- - State C(absent) will remove the rule. The 'absent' state was added in Ansible 2.4.
+ - With either V(before) or V(after) O(new_type), O(new_control), and O(new_module_path) must all be specified.
+ - If state is V(args_absent) or V(args_present), O(new_type), O(new_control), and O(new_module_path) will be ignored.
+ - State V(absent) will remove the rule.
type: str
choices: [ absent, before, after, args_absent, args_present, updated ]
default: updated
diff --git a/ansible_collections/community/general/plugins/modules/parted.py b/ansible_collections/community/general/plugins/modules/parted.py
index 8e6038180..382e47a47 100644
--- a/ansible_collections/community/general/plugins/modules/parted.py
+++ b/ansible_collections/community/general/plugins/modules/parted.py
@@ -21,10 +21,10 @@ description:
check the GNU parted manual.
requirements:
- This module requires C(parted) version 1.8.3 and above.
- - Option I(align) (except C(undefined)) requires C(parted) 2.1 or above.
+ - Option O(align) (except V(undefined)) requires C(parted) 2.1 or above.
- If the version of C(parted) is below 3.1, it requires a Linux version running
the C(sysfs) file system C(/sys/).
- - Requires the C(resizepart) command when using the I(resize) parameter.
+ - Requires the C(resizepart) command when using the O(resize) parameter.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -42,7 +42,7 @@ options:
required: true
align:
description:
- - Set alignment for newly created partitions. Use C(undefined) for parted default aligment.
+ - Set alignment for newly created partitions. Use V(undefined) for parted default alignment.
type: str
choices: [ cylinder, minimal, none, optimal, undefined ]
default: optimal
@@ -63,16 +63,16 @@ options:
label:
description:
- Disk label type or partition table to use.
- - If I(device) already contains a different label, it will be changed to I(label)
+ - If O(device) already contains a different label, it will be changed to O(label)
and any previous partitions will be lost.
- - A I(name) must be specified for a C(gpt) partition table.
+ - A O(name) must be specified for a V(gpt) partition table.
type: str
choices: [ aix, amiga, bsd, dvh, gpt, loop, mac, msdos, pc98, sun ]
default: msdos
part_type:
description:
- - May be specified only with I(label=msdos) or I(label=dvh).
- - Neither I(part_type) nor I(name) may be used with I(label=sun).
+ - May be specified only with O(label=msdos) or O(label=dvh).
+ - Neither O(part_type) nor O(name) may be used with O(label=sun).
type: str
choices: [ extended, logical, primary ]
default: primary
@@ -82,8 +82,8 @@ options:
that is, the "distance" from the start of the disk. Negative numbers
specify distance from the end of the disk.
- The distance can be specified with all the units supported by parted
- (except compat) and it is case sensitive, e.g. C(10GiB), C(15%).
- - Using negative values may require setting of I(fs_type) (see notes).
+ (except compat) and it is case sensitive, for example V(10GiB), V(15%).
+ - Using negative values may require setting of O(fs_type) (see notes).
type: str
default: 0%
part_end:
@@ -92,7 +92,7 @@ options:
that is, the "distance" from the start of the disk. Negative numbers
specify distance from the end of the disk.
- The distance can be specified with all the units supported by parted
- (except compat) and it is case sensitive, e.g. C(10GiB), C(15%).
+ (except compat) and it is case sensitive, for example V(10GiB), V(15%).
type: str
default: 100%
name:
@@ -106,19 +106,19 @@ options:
state:
description:
- Whether to create or delete a partition.
- - If set to C(info) the module will only return the device information.
+ - If set to V(info) the module will only return the device information.
type: str
choices: [ absent, present, info ]
default: info
fs_type:
description:
- If specified and the partition does not exist, will set filesystem type to given partition.
- - Parameter optional, but see notes below about negative I(part_start) values.
+ - Parameter optional, but see notes below about negative O(part_start) values.
type: str
version_added: '0.2.0'
resize:
description:
- - Call C(resizepart) on existing partitions to match the size specified by I(part_end).
+ - Call C(resizepart) on existing partitions to match the size specified by O(part_end).
type: bool
default: false
version_added: '1.3.0'
@@ -128,9 +128,9 @@ notes:
installed on the system is before version 3.1, the module queries the kernel
through C(/sys/) to obtain disk information. In this case the units CHS and
CYL are not supported.
- - Negative I(part_start) start values were rejected if I(fs_type) was not given.
- This bug was fixed in parted 3.2.153. If you want to use negative I(part_start),
- specify I(fs_type) as well or make sure your system contains newer parted.
+ - Negative O(part_start) start values were rejected if O(fs_type) was not given.
+ This bug was fixed in parted 3.2.153. If you want to use negative O(part_start),
+ specify O(fs_type) as well or make sure your system contains newer parted.
'''
RETURN = r'''
@@ -569,8 +569,18 @@ def parted(script, device, align):
if align == 'undefined':
align_option = ''
+ """
+ Use option --fix (-f) if available. Versions prior
+ to 3.4.64 don't have it. For more information see:
+ http://savannah.gnu.org/news/?id=10114
+ """
+ if parted_version() >= (3, 4, 64):
+ script_option = '-s -f'
+ else:
+ script_option = '-s'
+
if script and not module.check_mode:
- command = "%s -s -m %s %s -- %s" % (parted_exec, align_option, device, script)
+ command = "%s %s -m %s %s -- %s" % (parted_exec, script_option, align_option, device, script)
rc, out, err = module.run_command(command)
if rc != 0:
diff --git a/ansible_collections/community/general/plugins/modules/pear.py b/ansible_collections/community/general/plugins/modules/pear.py
index d7cb01b92..36770de6c 100644
--- a/ansible_collections/community/general/plugins/modules/pear.py
+++ b/ansible_collections/community/general/plugins/modules/pear.py
@@ -48,12 +48,12 @@ options:
description:
- List of regular expressions that can be used to detect prompts during pear package installation to answer the expected question.
- Prompts will be processed in the same order as the packages list.
- - You can optionnally specify an answer to any question in the list.
+ - You can optionally specify an answer to any question in the list.
- If no answer is provided, the list item will only contain the regular expression.
- "To specify an answer, the item will be a dict with the regular expression as key and the answer as value C(my_regular_expression: 'an_answer')."
- You can provide a list containing items with or without answer.
- A prompt list can be shorter or longer than the packages list but will issue a warning.
- - If you want to specify that a package will not need prompts in the middle of a list, C(null).
+ - If you want to specify that a package will not need prompts in the middle of a list, V(null).
type: list
elements: raw
version_added: 0.2.0
@@ -87,7 +87,7 @@ EXAMPLES = r'''
- name: Install multiple pear/pecl packages at once with prompts.
Prompts will be processed on the same order as the packages order.
If there is more prompts than packages, packages without prompts will be installed without any prompt expected.
- If there is more packages than prompts, additionnal prompts will be ignored.
+ If there is more packages than prompts, additional prompts will be ignored.
community.general.pear:
name: pecl/gnupg, pecl/apcu
state: present
@@ -98,7 +98,7 @@ EXAMPLES = r'''
- name: Install multiple pear/pecl packages at once skipping the first prompt.
Prompts will be processed on the same order as the packages order.
If there is more prompts than packages, packages without prompts will be installed without any prompt expected.
- If there is more packages than prompts, additionnal prompts will be ignored.
+ If there is more packages than prompts, additional prompts will be ignored.
community.general.pear:
name: pecl/gnupg, pecl/apcu
state: present
diff --git a/ansible_collections/community/general/plugins/modules/pids.py b/ansible_collections/community/general/plugins/modules/pids.py
index 665adb142..590f1e85a 100644
--- a/ansible_collections/community/general/plugins/modules/pids.py
+++ b/ansible_collections/community/general/plugins/modules/pids.py
@@ -31,7 +31,7 @@ options:
type: str
version_added: 3.0.0
ignore_case:
- description: Ignore case in pattern if using the I(pattern) option.
+ description: Ignore case in pattern if using the O(pattern) option.
type: bool
default: false
version_added: 3.0.0
diff --git a/ansible_collections/community/general/plugins/modules/pip_package_info.py b/ansible_collections/community/general/plugins/modules/pip_package_info.py
index 2cde7218d..6aea178ce 100644
--- a/ansible_collections/community/general/plugins/modules/pip_package_info.py
+++ b/ansible_collections/community/general/plugins/modules/pip_package_info.py
@@ -21,7 +21,7 @@ options:
clients:
description:
- A list of the pip executables that will be used to get the packages.
- They can be supplied with the full path or just the executable name, for example C(pip3.7).
+ They can be supplied with the full path or just the executable name, for example V(pip3.7).
default: ['pip']
required: false
type: list
diff --git a/ansible_collections/community/general/plugins/modules/pipx.py b/ansible_collections/community/general/plugins/modules/pipx.py
index dfa2f4300..705cc71a7 100644
--- a/ansible_collections/community/general/plugins/modules/pipx.py
+++ b/ansible_collections/community/general/plugins/modules/pipx.py
@@ -30,8 +30,8 @@ options:
default: install
description:
- Desired state for the application.
- - The states C(present) and C(absent) are aliases to C(install) and C(uninstall), respectively.
- - The state C(latest) is equivalent to executing the task twice, with state C(install) and then C(upgrade).
+ - The states V(present) and V(absent) are aliases to V(install) and V(uninstall), respectively.
+ - The state V(latest) is equivalent to executing the task twice, with state V(install) and then V(upgrade).
It was added in community.general 5.5.0.
name:
type: str
@@ -39,60 +39,60 @@ options:
- >
The name of the application to be installed. It must to be a simple package name.
For passing package specifications or installing from URLs or directories,
- please use the I(source) option.
+ please use the O(source) option.
source:
type: str
description:
- >
If the application source, such as a package with version specifier, or an URL,
directory or any other accepted specification. See C(pipx) documentation for more details.
- - When specified, the C(pipx) command will use I(source) instead of I(name).
+ - When specified, the C(pipx) command will use O(source) instead of O(name).
install_apps:
description:
- Add apps from the injected packages.
- - Only used when I(state=inject).
+ - Only used when O(state=inject).
type: bool
default: false
version_added: 6.5.0
install_deps:
description:
- Include applications of dependent packages.
- - Only used when I(state=install), I(state=latest), or I(state=inject).
+ - Only used when O(state=install), O(state=latest), or O(state=inject).
type: bool
default: false
inject_packages:
description:
- Packages to be injected into an existing virtual environment.
- - Only used when I(state=inject).
+ - Only used when O(state=inject).
type: list
elements: str
force:
description:
- Force modification of the application's virtual environment. See C(pipx) for details.
- - Only used when I(state=install), I(state=upgrade), I(state=upgrade_all), I(state=latest), or I(state=inject).
+ - Only used when O(state=install), O(state=upgrade), O(state=upgrade_all), O(state=latest), or O(state=inject).
type: bool
default: false
include_injected:
description:
- Upgrade the injected packages along with the application.
- - Only used when I(state=upgrade), I(state=upgrade_all), or I(state=latest).
- - This is used with I(state=upgrade) and I(state=latest) since community.general 6.6.0.
+ - Only used when O(state=upgrade), O(state=upgrade_all), or O(state=latest).
+ - This is used with O(state=upgrade) and O(state=latest) since community.general 6.6.0.
type: bool
default: false
index_url:
description:
- Base URL of Python Package Index.
- - Only used when I(state=install), I(state=upgrade), I(state=latest), or I(state=inject).
+ - Only used when O(state=install), O(state=upgrade), O(state=latest), or O(state=inject).
type: str
python:
description:
- Python version to be used when creating the application virtual environment. Must be 3.6+.
- - Only used when I(state=install), I(state=latest), I(state=reinstall), or I(state=reinstall_all).
+ - Only used when O(state=install), O(state=latest), O(state=reinstall), or O(state=reinstall_all).
type: str
system_site_packages:
description:
- Give application virtual environment access to the system site-packages directory.
- - Only used when I(state=install) or I(state=latest).
+ - Only used when O(state=install) or O(state=latest).
type: bool
default: false
version_added: 6.6.0
diff --git a/ansible_collections/community/general/plugins/modules/pipx_info.py b/ansible_collections/community/general/plugins/modules/pipx_info.py
index e2bb7fdae..34f9681b0 100644
--- a/ansible_collections/community/general/plugins/modules/pipx_info.py
+++ b/ansible_collections/community/general/plugins/modules/pipx_info.py
@@ -37,7 +37,7 @@ options:
include_raw:
description:
- Returns the raw output of C(pipx list --json).
- - The raw output is not affected by I(include_deps) or I(include_injected).
+ - The raw output is not affected by O(include_deps) or O(include_injected).
type: bool
default: false
executable:
@@ -51,7 +51,7 @@ notes:
- This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip).
- This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module.
- >
- This module will honor C(pipx) environment variables such as but not limited to C(PIPX_HOME) and C(PIPX_BIN_DIR)
+ This module will honor C(pipx) environment variables such as but not limited to E(PIPX_HOME) and E(PIPX_BIN_DIR)
passed using the R(environment Ansible keyword, playbooks_environment).
- This module requires C(pipx) version 0.16.2.1 or above.
- Please note that C(pipx) requires Python 3.6 or above.
@@ -98,20 +98,20 @@ application:
type: str
sample: "3.24.0"
dependencies:
- description: The dependencies of the installed application, when I(include_deps=true).
+ description: The dependencies of the installed application, when O(include_deps=true).
returned: success
type: list
elements: str
sample: ["virtualenv"]
injected:
- description: The injected packages for the installed application, when I(include_injected=true).
+ description: The injected packages for the installed application, when O(include_injected=true).
returned: success
type: dict
sample:
licenses: "0.6.1"
raw_output:
- description: The raw output of the C(pipx list) command, when I(include_raw=true). Used for debugging.
+ description: The raw output of the C(pipx list) command, when O(include_raw=true). Used for debugging.
returned: success
type: dict
diff --git a/ansible_collections/community/general/plugins/modules/pkg5.py b/ansible_collections/community/general/plugins/modules/pkg5.py
index f6bc77a71..c4aace9f2 100644
--- a/ansible_collections/community/general/plugins/modules/pkg5.py
+++ b/ansible_collections/community/general/plugins/modules/pkg5.py
@@ -29,13 +29,13 @@ options:
name:
description:
- An FRMI of the package(s) to be installed/removed/updated.
- - Multiple packages may be specified, separated by C(,).
+ - Multiple packages may be specified, separated by V(,).
required: true
type: list
elements: str
state:
description:
- - Whether to install (I(present), I(latest)), or remove (I(absent)) a package.
+ - Whether to install (V(present), V(latest)), or remove (V(absent)) a package.
choices: [ absent, latest, present, installed, removed, uninstalled ]
default: present
type: str
diff --git a/ansible_collections/community/general/plugins/modules/pkgin.py b/ansible_collections/community/general/plugins/modules/pkgin.py
index c08b25218..5b2e478b8 100644
--- a/ansible_collections/community/general/plugins/modules/pkgin.py
+++ b/ansible_collections/community/general/plugins/modules/pkgin.py
@@ -30,7 +30,7 @@ author:
notes:
- "Known bug with pkgin < 0.8.0: if a package is removed and another
package depends on it, the other package will be silently removed as
- well. New to Ansible 1.9: check-mode support."
+ well."
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -174,6 +174,13 @@ def query_package(module, name):
# '<' - installed but out of date
# '=' - installed and up to date
# '>' - installed but newer than the repository version
+
+ if (package in ('reading local summary...',
+ 'processing local summary...',
+ 'downloading pkg_summary.xz done.')) or \
+ (package.startswith('processing remote summary (')):
+ continue
+
pkgname_with_version, raw_state = package.split(splitchar)[0:2]
# Search for package, stripping version
@@ -317,7 +324,7 @@ def do_upgrade_packages(module, full=False):
format_pkgin_command(module, cmd))
if rc == 0:
- if re.search('^nothing to do.\n$', out):
+ if re.search('^(.*\n|)nothing to do.\n$', out):
module.exit_json(changed=False, msg="nothing left to upgrade")
else:
module.fail_json(msg="could not %s packages" % cmd, stdout=out, stderr=err)
diff --git a/ansible_collections/community/general/plugins/modules/pkgng.py b/ansible_collections/community/general/plugins/modules/pkgng.py
index b9d4422c0..88c9b8e3b 100644
--- a/ansible_collections/community/general/plugins/modules/pkgng.py
+++ b/ansible_collections/community/general/plugins/modules/pkgng.py
@@ -31,13 +31,7 @@ options:
name:
description:
- Name or list of names of packages to install/remove.
- - "With I(name=*), I(state=latest) will operate, but I(state=present) and I(state=absent) will be noops."
- - >
- Warning: In Ansible 2.9 and earlier this module had a misfeature
- where I(name=*) with I(state=latest) or I(state=present) would
- install every package from every package repository, filling up
- the machines disk. Avoid using them unless you are certain that
- your role will only be used with newer versions.
+ - "With O(name=*), O(state=latest) will operate, but O(state=present) and O(state=absent) will be noops."
required: true
aliases: [pkg]
type: list
@@ -45,7 +39,6 @@ options:
state:
description:
- State of the package.
- - 'Note: C(latest) added in 2.7.'
choices: [ 'present', 'latest', 'absent' ]
required: false
default: present
@@ -59,8 +52,8 @@ options:
annotation:
description:
- A list of keyvalue-pairs of the form
- C(<+/-/:><key>[=<value>]). A C(+) denotes adding an annotation, a
- C(-) denotes removing an annotation, and C(:) denotes modifying an
+ C(<+/-/:><key>[=<value>]). A V(+) denotes adding an annotation, a
+ V(-) denotes removing an annotation, and V(:) denotes modifying an
annotation.
If setting or modifying annotations, a value must be provided.
required: false
@@ -79,19 +72,19 @@ options:
description:
- For pkgng versions 1.5 and later, pkg will install all packages
within the specified root directory.
- - Can not be used together with I(chroot) or I(jail) options.
+ - Can not be used together with O(chroot) or O(jail) options.
required: false
type: path
chroot:
description:
- Pkg will chroot in the specified environment.
- - Can not be used together with I(rootdir) or I(jail) options.
+ - Can not be used together with O(rootdir) or O(jail) options.
required: false
type: path
jail:
description:
- Pkg will execute in the given jail name or id.
- - Can not be used together with I(chroot) or I(rootdir) options.
+ - Can not be used together with O(chroot) or O(rootdir) options.
type: str
autoremove:
description:
@@ -102,7 +95,7 @@ options:
ignore_osver:
description:
- Ignore FreeBSD OS version check, useful on -STABLE and -CURRENT branches.
- - Defines the C(IGNORE_OSVERSION) environment variable.
+ - Defines the E(IGNORE_OSVERSION) environment variable.
required: false
type: bool
default: false
@@ -111,7 +104,7 @@ author: "bleader (@bleader)"
notes:
- When using pkgsite, be careful that already in cache packages won't be downloaded again.
- When used with a C(loop:) each package will be processed individually,
- it is much more efficient to pass the list directly to the I(name) option.
+ it is much more efficient to pass the list directly to the O(name) option.
'''
EXAMPLES = '''
diff --git a/ansible_collections/community/general/plugins/modules/pkgutil.py b/ansible_collections/community/general/plugins/modules/pkgutil.py
index 5af74c1f3..15f98a9d4 100644
--- a/ansible_collections/community/general/plugins/modules/pkgutil.py
+++ b/ansible_collections/community/general/plugins/modules/pkgutil.py
@@ -36,7 +36,7 @@ options:
name:
description:
- The name of the package.
- - When using I(state=latest), this can be C('*'), which updates all installed packages managed by pkgutil.
+ - When using O(state=latest), this can be V('*'), which updates all installed packages managed by pkgutil.
type: list
required: true
elements: str
@@ -49,19 +49,19 @@ options:
type: str
state:
description:
- - Whether to install (C(present)/C(installed)), or remove (C(absent)/C(removed)) packages.
- - The upgrade (C(latest)) operation will update/install the packages to the latest version available.
+ - Whether to install (V(present)/V(installed)), or remove (V(absent)/V(removed)) packages.
+ - The upgrade (V(latest)) operation will update/install the packages to the latest version available.
type: str
required: true
choices: [ absent, installed, latest, present, removed ]
update_catalog:
description:
- - If you always want to refresh your catalog from the mirror, even when it's not stale, set this to C(true).
+ - If you always want to refresh your catalog from the mirror, even when it's not stale, set this to V(true).
type: bool
default: false
force:
description:
- - To allow the update process to downgrade packages to match what is present in the repository, set this to C(true).
+ - To allow the update process to downgrade packages to match what is present in the repository, set this to V(true).
- This is useful for rolling back to stable from testing, or similar operations.
type: bool
default: false
diff --git a/ansible_collections/community/general/plugins/modules/pmem.py b/ansible_collections/community/general/plugins/modules/pmem.py
index d7fcb8e01..4d10c448e 100644
--- a/ansible_collections/community/general/plugins/modules/pmem.py
+++ b/ansible_collections/community/general/plugins/modules/pmem.py
@@ -30,10 +30,10 @@ attributes:
options:
appdirect:
description:
- - Percentage of the total capacity to use in AppDirect Mode (C(0)-C(100)).
+ - Percentage of the total capacity to use in AppDirect Mode (V(0)-V(100)).
- Create AppDirect capacity utilizing hardware interleaving across the
requested PMem modules if applicable given the specified target.
- - Total of I(appdirect), I(memorymode) and I(reserved) must be C(100)
+ - Total of O(appdirect), O(memorymode) and O(reserved) must be V(100)
type: int
appdirect_interleaved:
description:
@@ -43,20 +43,20 @@ options:
default: true
memorymode:
description:
- - Percentage of the total capacity to use in Memory Mode (C(0)-C(100)).
+ - Percentage of the total capacity to use in Memory Mode (V(0)-V(100)).
type: int
reserved:
description:
- - Percentage of the capacity to reserve (C(0)-C(100)). I(reserved) will not be mapped
+ - Percentage of the capacity to reserve (V(0)-V(100)). O(reserved) will not be mapped
into the system physical address space and will be presented as reserved
capacity with Show Device and Show Memory Resources Commands.
- - I(reserved) will be set automatically if this is not configured.
+ - O(reserved) will be set automatically if this is not configured.
type: int
required: false
socket:
description:
- This enables to set the configuration for each socket by using the socket ID.
- - Total of I(appdirect), I(memorymode) and I(reserved) must be C(100) within one socket.
+ - Total of O(appdirect), O(memorymode) and O(reserved) must be V(100) within one socket.
type: list
elements: dict
suboptions:
@@ -66,7 +66,7 @@ options:
required: true
appdirect:
description:
- - Percentage of the total capacity to use in AppDirect Mode (C(0)-C(100)) within the socket ID.
+ - Percentage of the total capacity to use in AppDirect Mode (V(0)-V(100)) within the socket ID.
type: int
required: true
appdirect_interleaved:
@@ -77,12 +77,12 @@ options:
default: true
memorymode:
description:
- - Percentage of the total capacity to use in Memory Mode (C(0)-C(100)) within the socket ID.
+ - Percentage of the total capacity to use in Memory Mode (V(0)-V(100)) within the socket ID.
type: int
required: true
reserved:
description:
- - Percentage of the capacity to reserve (C(0)-C(100)) within the socket ID.
+ - Percentage of the capacity to reserve (V(0)-V(100)) within the socket ID.
type: int
namespace:
description:
@@ -104,8 +104,8 @@ options:
choices: ['pmem', 'blk']
size:
description:
- - The size of namespace. This option supports the suffixes C(k) or C(K) or C(KB) for KiB,
- C(m) or C(M) or C(MB) for MiB, C(g) or C(G) or C(GB) for GiB and C(t) or C(T) or C(TB) for TiB.
+ - The size of namespace. This option supports the suffixes V(k) or V(K) or V(KB) for KiB,
+ V(m) or V(M) or V(MB) for MiB, V(g) or V(G) or V(GB) for GiB and V(t) or V(T) or V(TB) for TiB.
- This option is required if multiple namespaces are configured.
- If this option is not set, all of the available space of a region is configured.
type: str
@@ -113,7 +113,7 @@ options:
namespace_append:
description:
- Enable to append the new namespaces to the system.
- - The default is C(false) so the all existing namespaces not listed in I(namespace) are removed.
+ - The default is V(false) so the all existing namespaces not listed in O(namespace) are removed.
type: bool
default: false
required: false
@@ -128,8 +128,8 @@ reboot_required:
result:
description:
- Shows the value of AppDirect, Memory Mode and Reserved size in bytes.
- - If I(socket) argument is provided, shows the values in each socket with C(socket) which contains the socket ID.
- - If I(namespace) argument is provided, shows the detail of each namespace.
+ - If O(socket) argument is provided, shows the values in each socket with C(socket) which contains the socket ID.
+ - If O(namespace) argument is provided, shows the detail of each namespace.
returned: success
type: list
elements: dict
diff --git a/ansible_collections/community/general/plugins/modules/pnpm.py b/ansible_collections/community/general/plugins/modules/pnpm.py
new file mode 100644
index 000000000..315b07ba8
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pnpm.py
@@ -0,0 +1,462 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2023 Aritra Sen <aretrosen@proton.me>
+# Copyright (c) 2017 Chris Hoffman <christopher.hoffman@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: pnpm
+short_description: Manage node.js packages with pnpm
+version_added: 7.4.0
+description:
+ - Manage node.js packages with the L(pnpm package manager, https://pnpm.io/).
+author:
+ - "Aritra Sen (@aretrosen)"
+ - "Chris Hoffman (@chrishoffman), creator of NPM Ansible module"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - The name of a node.js library to install.
+ - All packages in package.json are installed if not provided.
+ type: str
+ required: false
+ alias:
+ description:
+ - Alias of the node.js library.
+ type: str
+ required: false
+ path:
+ description:
+ - The base path to install the node.js libraries.
+ type: path
+ required: false
+ version:
+ description:
+ - The version of the library to be installed, in semver format.
+ type: str
+ required: false
+ global:
+ description:
+ - Install the node.js library globally.
+ required: false
+ default: false
+ type: bool
+ executable:
+ description:
+ - The executable location for pnpm.
+ - The default location it searches for is E(PATH), fails if not set.
+ type: path
+ required: false
+ ignore_scripts:
+ description:
+ - Use the C(--ignore-scripts) flag when installing.
+ required: false
+ type: bool
+ default: false
+ no_optional:
+ description:
+ - Do not install optional packages, equivalent to C(--no-optional).
+ required: false
+ type: bool
+ default: false
+ production:
+ description:
+ - Install dependencies in production mode.
+ - Pnpm will ignore any dependencies under C(devDependencies) in package.json.
+ required: false
+ type: bool
+ default: false
+ dev:
+ description:
+ - Install dependencies in development mode.
+ - Pnpm will ignore any regular dependencies in C(package.json).
+ required: false
+ default: false
+ type: bool
+ optional:
+ description:
+ - Install dependencies in optional mode.
+ required: false
+ default: false
+ type: bool
+ state:
+ description:
+ - Installation state of the named node.js library.
+ - If V(absent) is selected, a name option must be provided.
+ type: str
+ required: false
+ default: present
+ choices: ["present", "absent", "latest"]
+requirements:
+ - Pnpm executable present in E(PATH).
+"""
+
+EXAMPLES = """
+- name: Install "tailwindcss" node.js package.
+ community.general.pnpm:
+ name: tailwindcss
+ path: /app/location
+
+- name: Install "tailwindcss" node.js package on version 3.3.2
+ community.general.pnpm:
+ name: tailwindcss
+ version: 3.3.2
+ path: /app/location
+
+- name: Install "tailwindcss" node.js package globally.
+ community.general.pnpm:
+ name: tailwindcss
+ global: true
+
+- name: Install "tailwindcss" node.js package as dev dependency.
+ community.general.pnpm:
+ name: tailwindcss
+ path: /app/location
+ dev: true
+
+- name: Install "tailwindcss" node.js package as optional dependency.
+ community.general.pnpm:
+ name: tailwindcss
+ path: /app/location
+ optional: true
+
+- name: Install "tailwindcss" node.js package version 0.1.3 as tailwind-1
+ community.general.pnpm:
+ name: tailwindcss
+ alias: tailwind-1
+ version: 0.1.3
+ path: /app/location
+
+- name: Remove the globally-installed package "tailwindcss".
+ community.general.pnpm:
+ name: tailwindcss
+ global: true
+ state: absent
+
+- name: Install packages based on package.json.
+ community.general.pnpm:
+ path: /app/location
+
+- name: Update all packages in package.json to their latest version.
+ community.general.pnpm:
+ path: /app/location
+ state: latest
+"""
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+class Pnpm(object):
+ def __init__(self, module, **kwargs):
+ self.module = module
+ self.name = kwargs["name"]
+ self.alias = kwargs["alias"]
+ self.version = kwargs["version"]
+ self.path = kwargs["path"]
+ self.globally = kwargs["globally"]
+ self.executable = kwargs["executable"]
+ self.ignore_scripts = kwargs["ignore_scripts"]
+ self.no_optional = kwargs["no_optional"]
+ self.production = kwargs["production"]
+ self.dev = kwargs["dev"]
+ self.optional = kwargs["optional"]
+
+ self.alias_name_ver = None
+
+ if self.alias is not None:
+ self.alias_name_ver = self.alias + "@npm:"
+
+ if self.name is not None:
+ self.alias_name_ver = (self.alias_name_ver or "") + self.name
+ if self.version is not None:
+ self.alias_name_ver = self.alias_name_ver + "@" + str(self.version)
+ else:
+ self.alias_name_ver = self.alias_name_ver + "@latest"
+
+ def _exec(self, args, run_in_check_mode=False, check_rc=True):
+ if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
+ cmd = self.executable + args
+
+ if self.globally:
+ cmd.append("-g")
+
+ if self.ignore_scripts:
+ cmd.append("--ignore-scripts")
+
+ if self.no_optional:
+ cmd.append("--no-optional")
+
+ if self.production:
+ cmd.append("-P")
+
+ if self.dev:
+ cmd.append("-D")
+
+ if self.name and self.optional:
+ cmd.append("-O")
+
+ # If path is specified, cd into that path and run the command.
+ cwd = None
+ if self.path:
+ if not os.path.exists(self.path):
+ os.makedirs(self.path)
+
+ if not os.path.isdir(self.path):
+ self.module.fail_json(msg="Path %s is not a directory" % self.path)
+
+ if not self.alias_name_ver and not os.path.isfile(
+ os.path.join(self.path, "package.json")
+ ):
+ self.module.fail_json(
+ msg="package.json does not exist in provided path"
+ )
+
+ cwd = self.path
+
+ _rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
+ return out, err
+
+ return None, None
+
+ def missing(self):
+ if not os.path.isfile(os.path.join(self.path, "pnpm-lock.yaml")):
+ return True
+
+ cmd = ["list", "--json"]
+
+ if self.name is not None:
+ cmd.append(self.name)
+
+ try:
+ out, err = self._exec(cmd, True, False)
+ if err is not None and err != "":
+ raise Exception(out)
+
+ data = json.loads(out)
+ except Exception as e:
+ self.module.fail_json(
+ msg="Failed to parse pnpm output with error %s" % to_native(e)
+ )
+
+ if "error" in data:
+ return True
+
+ data = data[0]
+
+ for typedep in [
+ "dependencies",
+ "devDependencies",
+ "optionalDependencies",
+ "unsavedDependencies",
+ ]:
+ if typedep not in data:
+ continue
+
+ for dep, prop in data[typedep].items():
+ if self.alias is not None and self.alias != dep:
+ continue
+
+ name = prop["from"] if self.alias is not None else dep
+ if self.name != name:
+ continue
+
+ if self.version is None or self.version == prop["version"]:
+ return False
+
+ break
+
+ return True
+
+ def install(self):
+ if self.alias_name_ver is not None:
+ return self._exec(["add", self.alias_name_ver])
+ return self._exec(["install"])
+
+ def update(self):
+ return self._exec(["update", "--latest"])
+
+ def uninstall(self):
+ if self.alias is not None:
+ return self._exec(["remove", self.alias])
+ return self._exec(["remove", self.name])
+
+ def list_outdated(self):
+ if not os.path.isfile(os.path.join(self.path, "pnpm-lock.yaml")):
+ return list()
+
+ cmd = ["outdated", "--format", "json"]
+ try:
+ out, err = self._exec(cmd, True, False)
+
+ # BUG: It will not show correct error sometimes, like when it has
+ # plain text output intermingled with a {}
+ if err is not None and err != "":
+ raise Exception(out)
+
+ # HACK: To fix the above bug, the following hack is implemented
+ data_lines = out.splitlines(True)
+
+ out = None
+ for line in data_lines:
+ if len(line) > 0 and line[0] == "{":
+ out = line
+ continue
+
+ if len(line) > 0 and line[0] == "}":
+ out += line
+ break
+
+ if out is not None:
+ out += line
+
+ data = json.loads(out)
+ except Exception as e:
+ self.module.fail_json(
+ msg="Failed to parse pnpm output with error %s" % to_native(e)
+ )
+
+ return data.keys()
+
+
+def main():
+ arg_spec = dict(
+ name=dict(default=None),
+ alias=dict(default=None),
+ path=dict(default=None, type="path"),
+ version=dict(default=None),
+ executable=dict(default=None, type="path"),
+ ignore_scripts=dict(default=False, type="bool"),
+ no_optional=dict(default=False, type="bool"),
+ production=dict(default=False, type="bool"),
+ dev=dict(default=False, type="bool"),
+ optional=dict(default=False, type="bool"),
+ state=dict(default="present", choices=["present", "absent", "latest"]),
+ )
+ arg_spec["global"] = dict(default=False, type="bool")
+ module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
+
+ name = module.params["name"]
+ alias = module.params["alias"]
+ path = module.params["path"]
+ version = module.params["version"]
+ globally = module.params["global"]
+ ignore_scripts = module.params["ignore_scripts"]
+ no_optional = module.params["no_optional"]
+ production = module.params["production"]
+ dev = module.params["dev"]
+ optional = module.params["optional"]
+ state = module.params["state"]
+
+ if module.params["executable"]:
+ executable = module.params["executable"].split(" ")
+ else:
+ executable = [module.get_bin_path("pnpm", True)]
+
+ if name is None and version is not None:
+ module.fail_json(msg="version is meaningless when name is not provided")
+
+ if name is None and alias is not None:
+ module.fail_json(msg="alias is meaningless when name is not provided")
+
+ if path is None and not globally:
+ module.fail_json(msg="path must be specified when not using global")
+ elif path is not None and globally:
+ module.fail_json(msg="Cannot specify path when doing global installation")
+
+ if globally and (production or dev or optional):
+ module.fail_json(
+ msg="Options production, dev, and optional is meaningless when installing packages globally"
+ )
+
+ if name is not None and path is not None and globally:
+ module.fail_json(msg="path should not be mentioned when installing globally")
+
+ if production and dev and optional:
+ module.fail_json(
+ msg="Options production and dev and optional don't go together"
+ )
+
+ if production and dev:
+ module.fail_json(msg="Options production and dev don't go together")
+
+ if production and optional:
+ module.fail_json(msg="Options production and optional don't go together")
+
+ if dev and optional:
+ module.fail_json(msg="Options dev and optional don't go together")
+
+ if name is not None and name[0:4] == "http" and version is not None:
+ module.fail_json(msg="Semver not supported on remote url downloads")
+
+ if name is None and optional:
+ module.fail_json(
+ msg="Optional not available when package name not provided, use no_optional instead"
+ )
+
+ if state == "absent" and name is None:
+ module.fail_json(msg="Package name is required for uninstalling")
+
+ if globally:
+ _rc, out, _err = module.run_command(executable + ["root", "-g"], check_rc=True)
+ path, _tail = os.path.split(out.strip())
+
+ pnpm = Pnpm(
+ module,
+ name=name,
+ alias=alias,
+ path=path,
+ version=version,
+ globally=globally,
+ executable=executable,
+ ignore_scripts=ignore_scripts,
+ no_optional=no_optional,
+ production=production,
+ dev=dev,
+ optional=optional,
+ )
+
+ changed = False
+ out = ""
+ err = ""
+ if state == "present":
+ if pnpm.missing():
+ changed = True
+ out, err = pnpm.install()
+ elif state == "latest":
+ outdated = pnpm.list_outdated()
+ if name is not None:
+ if pnpm.missing() or name in outdated:
+ changed = True
+ out, err = pnpm.install()
+ elif len(outdated):
+ changed = True
+ out, err = pnpm.update()
+ else: # absent
+ if not pnpm.missing():
+ changed = True
+ out, err = pnpm.uninstall()
+
+ module.exit_json(changed=changed, out=out, err=err)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/portage.py b/ansible_collections/community/general/plugins/modules/portage.py
index 1c6b36537..112f6d2d7 100644
--- a/ansible_collections/community/general/plugins/modules/portage.py
+++ b/ansible_collections/community/general/plugins/modules/portage.py
@@ -33,7 +33,7 @@ attributes:
options:
package:
description:
- - Package atom or set, e.g. C(sys-apps/foo) or C(>foo-2.13) or C(@world)
+ - Package atom or set, for example V(sys-apps/foo) or V(>foo-2.13) or V(@world)
aliases: [name]
type: list
elements: str
@@ -124,8 +124,8 @@ options:
sync:
description:
- Sync package repositories first
- - If C(yes), perform "emerge --sync"
- - If C(web), perform "emerge-webrsync"
+ - If V(yes), perform "emerge --sync"
+ - If V(web), perform "emerge-webrsync"
choices: [ "web", "yes", "no" ]
type: str
diff --git a/ansible_collections/community/general/plugins/modules/pritunl_org.py b/ansible_collections/community/general/plugins/modules/pritunl_org.py
index df2df4494..4945a8fc2 100644
--- a/ansible_collections/community/general/plugins/modules/pritunl_org.py
+++ b/ansible_collections/community/general/plugins/modules/pritunl_org.py
@@ -37,9 +37,9 @@ options:
type: bool
default: false
description:
- - If I(force) is C(true) and I(state) is C(absent), the module
+ - If O(force) is V(true) and O(state) is V(absent), the module
will delete the organization, no matter if it contains users
- or not. By default I(force) is C(false), which will cause the
+ or not. By default O(force) is V(false), which will cause the
module to fail the deletion of the organization when it contains
users.
@@ -50,9 +50,9 @@ options:
- present
- absent
description:
- - If C(present), the module adds organization I(name) to
- Pritunl. If C(absent), attempt to delete the organization
- from Pritunl (please read about I(force) usage).
+ - If V(present), the module adds organization O(name) to
+ Pritunl. If V(absent), attempt to delete the organization
+ from Pritunl (please read about O(force) usage).
"""
EXAMPLES = """
diff --git a/ansible_collections/community/general/plugins/modules/pritunl_user.py b/ansible_collections/community/general/plugins/modules/pritunl_user.py
index 5aac23393..bdbc335d9 100644
--- a/ansible_collections/community/general/plugins/modules/pritunl_user.py
+++ b/ansible_collections/community/general/plugins/modules/pritunl_user.py
@@ -40,9 +40,9 @@ options:
- present
- absent
description:
- - If C(present), the module adds user I(user_name) to
- the Pritunl I(organization). If C(absent), removes the user
- I(user_name) from the Pritunl I(organization).
+ - If V(present), the module adds user O(user_name) to
+ the Pritunl O(organization). If V(absent), removes the user
+ O(user_name) from the Pritunl O(organization).
user_name:
type: str
@@ -56,7 +56,7 @@ options:
required: false
default: null
description:
- - Email address associated with the user I(user_name).
+ - Email address associated with the user O(user_name).
user_type:
type: str
@@ -66,7 +66,7 @@ options:
- client
- server
description:
- - Type of the user I(user_name).
+ - Type of the user O(user_name).
user_groups:
type: list
@@ -74,27 +74,27 @@ options:
required: false
default: null
description:
- - List of groups associated with the user I(user_name).
+ - List of groups associated with the user O(user_name).
user_disabled:
type: bool
required: false
default: null
description:
- - Enable/Disable the user I(user_name).
+ - Enable/Disable the user O(user_name).
user_gravatar:
type: bool
required: false
default: null
description:
- - Enable/Disable Gravatar usage for the user I(user_name).
+ - Enable/Disable Gravatar usage for the user O(user_name).
user_mac_addresses:
type: list
elements: str
description:
- - Allowed MAC addresses for the user I(user_name).
+ - Allowed MAC addresses for the user O(user_name).
version_added: 5.0.0
"""
diff --git a/ansible_collections/community/general/plugins/modules/pritunl_user_info.py b/ansible_collections/community/general/plugins/modules/pritunl_user_info.py
index 7b0399061..3f8f62003 100644
--- a/ansible_collections/community/general/plugins/modules/pritunl_user_info.py
+++ b/ansible_collections/community/general/plugins/modules/pritunl_user_info.py
@@ -43,7 +43,7 @@ options:
- client
- server
description:
- - Type of the user I(user_name).
+ - Type of the user O(user_name).
"""
EXAMPLES = """
diff --git a/ansible_collections/community/general/plugins/modules/profitbricks.py b/ansible_collections/community/general/plugins/modules/profitbricks.py
index c8bcceb93..875bd78c4 100644
--- a/ansible_collections/community/general/plugins/modules/profitbricks.py
+++ b/ansible_collections/community/general/plugins/modules/profitbricks.py
@@ -130,7 +130,7 @@ options:
state:
description:
- create or terminate instances
- - 'The choices available are: C(running), C(stopped), C(absent), C(present).'
+ - 'The choices available are: V(running), V(stopped), V(absent), V(present).'
type: str
default: 'present'
disk_type:
@@ -142,7 +142,6 @@ options:
requirements:
- "profitbricks"
- - "python >= 2.6"
author: Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
'''
diff --git a/ansible_collections/community/general/plugins/modules/profitbricks_datacenter.py b/ansible_collections/community/general/plugins/modules/profitbricks_datacenter.py
index a096db752..4aa1fa5ee 100644
--- a/ansible_collections/community/general/plugins/modules/profitbricks_datacenter.py
+++ b/ansible_collections/community/general/plugins/modules/profitbricks_datacenter.py
@@ -63,7 +63,7 @@ options:
state:
description:
- Create or terminate datacenters.
- - "The available choices are: C(present), C(absent)."
+ - "The available choices are: V(present), V(absent)."
type: str
required: false
default: 'present'
diff --git a/ansible_collections/community/general/plugins/modules/profitbricks_nic.py b/ansible_collections/community/general/plugins/modules/profitbricks_nic.py
index 17a30b052..9498be15d 100644
--- a/ansible_collections/community/general/plugins/modules/profitbricks_nic.py
+++ b/ansible_collections/community/general/plugins/modules/profitbricks_nic.py
@@ -65,7 +65,7 @@ options:
state:
description:
- Indicate desired state of the resource
- - "The available choices are: C(present), C(absent)."
+ - "The available choices are: V(present), V(absent)."
type: str
required: false
default: 'present'
diff --git a/ansible_collections/community/general/plugins/modules/profitbricks_volume.py b/ansible_collections/community/general/plugins/modules/profitbricks_volume.py
index f9d257b68..f623da712 100644
--- a/ansible_collections/community/general/plugins/modules/profitbricks_volume.py
+++ b/ansible_collections/community/general/plugins/modules/profitbricks_volume.py
@@ -68,7 +68,7 @@ options:
licence_type:
description:
- The licence type for the volume. This is used when the image is non-standard.
- - "The available choices are: C(LINUX), C(WINDOWS), C(UNKNOWN), C(OTHER)."
+ - "The available choices are: V(LINUX), V(WINDOWS), V(UNKNOWN), V(OTHER)."
type: str
required: false
default: UNKNOWN
@@ -113,7 +113,7 @@ options:
state:
description:
- create or terminate datacenters
- - "The available choices are: C(present), C(absent)."
+ - "The available choices are: V(present), V(absent)."
type: str
required: false
default: 'present'
diff --git a/ansible_collections/community/general/plugins/modules/profitbricks_volume_attachments.py b/ansible_collections/community/general/plugins/modules/profitbricks_volume_attachments.py
index 75cd73df3..76459515e 100644
--- a/ansible_collections/community/general/plugins/modules/profitbricks_volume_attachments.py
+++ b/ansible_collections/community/general/plugins/modules/profitbricks_volume_attachments.py
@@ -58,7 +58,7 @@ options:
state:
description:
- Indicate desired state of the resource
- - "The available choices are: C(present), C(absent)."
+ - "The available choices are: V(present), V(absent)."
type: str
required: false
default: 'present'
diff --git a/ansible_collections/community/general/plugins/modules/proxmox.py b/ansible_collections/community/general/plugins/modules/proxmox.py
index 315ee601a..47f3faa4f 100644
--- a/ansible_collections/community/general/plugins/modules/proxmox.py
+++ b/ansible_collections/community/general/plugins/modules/proxmox.py
@@ -13,9 +13,9 @@ DOCUMENTATION = '''
module: proxmox
short_description: Management of instances in Proxmox VE cluster
description:
- - allows you to create/delete/stop instances in Proxmox VE cluster
- - Starting in Ansible 2.1, it automatically detects containerization type (lxc for PVE 4, openvz for older)
- - Since community.general 4.0.0 on, there are no more default values, see I(proxmox_default_behavior).
+ - Allows you to create/delete/stop instances in Proxmox VE cluster.
+ - The module automatically detects containerization type (lxc for PVE 4, openvz for older).
+ - Since community.general 4.0.0 on, there are no more default values, see O(proxmox_default_behavior).
attributes:
check_mode:
support: none
@@ -29,45 +29,46 @@ options:
hostname:
description:
- the instance hostname
- - required only for C(state=present)
+ - required only for O(state=present)
- must be unique if vmid is not passed
type: str
ostemplate:
description:
- the template for VM creating
- - required only for C(state=present)
+ - required only for O(state=present)
type: str
disk:
description:
- This option was previously described as "hard disk size in GB for instance" however several formats describing
a lxc mount are permitted.
- - Older versions of Proxmox will accept a numeric value for size using the I(storage) parameter to automatically
+ - Older versions of Proxmox will accept a numeric value for size using the O(storage) parameter to automatically
choose which storage to allocate from, however new versions enforce the C(<STORAGE>:<SIZE>) syntax.
- "Additional options are available by using some combination of the following key-value pairs as a
comma-delimited list C([volume=]<volume> [,acl=<1|0>] [,mountoptions=<opt[;opt...]>] [,quota=<1|0>]
[,replicate=<1|0>] [,ro=<1|0>] [,shared=<1|0>] [,size=<DiskSize>])."
- See U(https://pve.proxmox.com/wiki/Linux_Container) for a full description.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(3).
+ - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(3).
+ - Should not be used in conjunction with O(storage).
type: str
cores:
description:
- Specify number of cores per socket.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1).
+ - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(1).
type: int
cpus:
description:
- numbers of allocated cpus for instance
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1).
+ - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(1).
type: int
memory:
description:
- memory size in MB for instance
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(512).
+ - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(512).
type: int
swap:
description:
- swap memory size in MB for instance
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(0).
+ - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(0).
type: int
netif:
description:
@@ -80,6 +81,15 @@ options:
type: list
elements: str
version_added: 2.0.0
+ startup:
+ description:
+ - Specifies the startup order of the container.
+ - Use C(order=#) where C(#) is a non-negative number to define the general startup order. Shutdown in done with reverse ordering.
+ - Use C(up=#) where C(#) is in seconds, to specify a delay to wait before the next VM is started.
+ - Use C(down=#) where C(#) is in seconds, to specify a delay to wait before the next VM is stopped.
+ type: list
+ elements: str
+ version_added: 8.5.0
mounts:
description:
- specifies additional mounts (separate disks) for the container. As a hash/dictionary defining mount points
@@ -91,17 +101,26 @@ options:
onboot:
description:
- specifies whether a VM will be started during system bootup
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(false).
+ - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(false).
type: bool
storage:
description:
- target storage
+ - Should not be used in conjunction with O(disk).
type: str
default: 'local'
+ ostype:
+ description:
+ - Specifies the C(ostype) of the LXC container.
+ - If set to V(auto), no C(ostype) will be provided on instance creation.
+ choices: ['auto', 'debian', 'devuan', 'ubuntu', 'centos', 'fedora', 'opensuse', 'archlinux', 'alpine', 'gentoo', 'nixos', 'unmanaged']
+ type: str
+ default: 'auto'
+ version_added: 8.1.0
cpuunits:
description:
- CPU weight for a VM
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1000).
+ - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(1000).
type: int
nameserver:
description:
@@ -114,7 +133,7 @@ options:
tags:
description:
- List of tags to apply to the container.
- - Tags must start with C([a-z0-9_]) followed by zero or more of the following characters C([a-z0-9_-+.]).
+ - Tags must start with V([a-z0-9_]) followed by zero or more of the following characters V([a-z0-9_-+.]).
- Tags are only available in Proxmox 7+.
type: list
elements: str
@@ -124,12 +143,18 @@ options:
- timeout for operations
type: int
default: 30
+ update:
+ description:
+ - If V(true), the container will be updated with new values.
+ type: bool
+ default: false
+ version_added: 8.1.0
force:
description:
- - forcing operations
- - can be used only with states C(present), C(stopped), C(restarted)
- - with C(state=present) force option allow to overwrite existing container
- - with states C(stopped) , C(restarted) allow to force stop instance
+ - Forcing operations.
+ - Can be used only with states V(present), V(stopped), V(restarted).
+ - with O(state=present) force option allow to overwrite existing container.
+ - with states V(stopped), V(restarted) allow to force stop instance.
type: bool
default: false
purge:
@@ -137,15 +162,16 @@ options:
- Remove container from all related configurations.
- For example backup jobs, replication jobs, or HA.
- Related ACLs and Firewall entries will always be removed.
- - Used with state C(absent).
+ - Used with O(state=absent).
type: bool
default: false
version_added: 2.3.0
state:
description:
- Indicate desired state of the instance
+ - V(template) was added in community.general 8.1.0.
type: str
- choices: ['present', 'started', 'absent', 'stopped', 'restarted']
+ choices: ['present', 'started', 'absent', 'stopped', 'restarted', 'template']
default: present
pubkey:
description:
@@ -154,10 +180,9 @@ options:
unprivileged:
description:
- Indicate if the container should be unprivileged.
- - >
- The default value for this parameter is C(false) but that is deprecated
- and it will be replaced with C(true) in community.general 7.0.0.
+ - The default change to V(true) in community.general 7.0.0. It used to be V(false) before.
type: bool
+ default: true
description:
description:
- Specify the description for the container. Only used on the configuration web interface.
@@ -169,15 +194,25 @@ options:
- Script that will be executed during various steps in the containers lifetime.
type: str
version_added: '0.2.0'
+ timezone:
+ description:
+ - Timezone used by the container, accepts values like V(Europe/Paris).
+ - The special value V(host) configures the same timezone used by Proxmox host.
+ type: str
+ version_added: '7.1.0'
proxmox_default_behavior:
description:
- As of community.general 4.0.0, various options no longer have default values.
These default values caused problems when users expected different behavior from Proxmox
by default or filled options which caused problems when set.
- - The value C(compatibility) (default before community.general 4.0.0) will ensure that the default values
- are used when the values are not explicitly specified by the user. The new default is C(no_defaults),
+ - The value V(compatibility) (default before community.general 4.0.0) will ensure that the default values
+ are used when the values are not explicitly specified by the user. The new default is V(no_defaults),
which makes sure these options have no defaults.
- - This affects the I(disk), I(cores), I(cpus), I(memory), I(onboot), I(swap), I(cpuunits) options.
+ - This affects the O(disk), O(cores), O(cpus), O(memory), O(onboot), O(swap), and O(cpuunits) options.
+ - >
+ This parameter is now B(deprecated) and it will be removed in community.general 10.0.0.
+ By then, the module's behavior should be to not set default values, equivalent to V(no_defaults).
+ If a consistent set of defaults is needed, the playbook or role should be responsible for setting it.
type: str
default: no_defaults
choices:
@@ -187,23 +222,25 @@ options:
clone:
description:
- ID of the container to be cloned.
- - I(description), I(hostname), and I(pool) will be copied from the cloned container if not specified.
- - The type of clone created is defined by the I(clone_type) parameter.
+ - O(description), O(hostname), and O(pool) will be copied from the cloned container if not specified.
+ - The type of clone created is defined by the O(clone_type) parameter.
- This operator is only supported for Proxmox clusters that use LXC containerization (PVE version >= 4).
type: int
version_added: 4.3.0
clone_type:
description:
- Type of the clone created.
- - C(full) creates a full clone, and I(storage) must be specified.
- - C(linked) creates a linked clone, and the cloned container must be a template container.
- - C(opportunistic) creates a linked clone if the cloned container is a template container, and a full clone if not.
- I(storage) may be specified, if not it will fall back to the default.
+ - V(full) creates a full clone, and O(storage) must be specified.
+ - V(linked) creates a linked clone, and the cloned container must be a template container.
+ - V(opportunistic) creates a linked clone if the cloned container is a template container, and a full clone if not.
+ O(storage) may be specified, if not it will fall back to the default.
type: str
choices: ['full', 'linked', 'opportunistic']
default: opportunistic
version_added: 4.3.0
author: Sergei Antipov (@UnderGreen)
+seealso:
+ - module: community.general.proxmox_vm_info
extends_documentation_fragment:
- community.general.proxmox.documentation
- community.general.proxmox.selection
@@ -222,6 +259,18 @@ EXAMPLES = r'''
hostname: example.org
ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+- name: Create new container with minimal options specifying disk storage location and size
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ disk: 'local-lvm:20'
+
- name: Create new container with hookscript and description
community.general.proxmox:
vmid: 100
@@ -300,7 +349,7 @@ EXAMPLES = r'''
api_host: node1
password: 123456
hostname: example.org
- ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
mounts: '{"mp0":"local:8,mp=/mnt/test/"}'
- name: Create new container with minimal options defining a cpu core limit
@@ -312,9 +361,21 @@ EXAMPLES = r'''
api_host: node1
password: 123456
hostname: example.org
- ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
cores: 2
+- name: Create new container with minimal options and same timezone as proxmox host
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ timezone: host
+
- name: Create a new container with nesting enabled and allows the use of CIFS/NFS inside the container.
community.general.proxmox:
vmid: 100
@@ -324,7 +385,7 @@ EXAMPLES = r'''
api_host: node1
password: 123456
hostname: example.org
- ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
features:
- nesting=1
- mount=cifs,nfs
@@ -352,6 +413,16 @@ EXAMPLES = r'''
hostname: clone.example.org
storage: local
+- name: Update container configuration
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ netif: '{"net0":"name=eth0,gw=192.168.0.1,ip=192.168.0.3/24,bridge=vmbr0"}'
+ update: true
+
- name: Start container
community.general.proxmox:
vmid: 100
@@ -396,6 +467,23 @@ EXAMPLES = r'''
api_host: node1
state: restarted
+- name: Convert container to template
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: template
+
+- name: Convert container to template (stop container if running)
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: template
+ force: true
+
- name: Remove container
community.general.proxmox:
vmid: 100
@@ -427,27 +515,105 @@ class ProxmoxLxcAnsible(ProxmoxAnsible):
"""Check if the specified container is a template."""
proxmox_node = self.proxmox_api.nodes(node)
config = getattr(proxmox_node, VZ_TYPE)(vmid).config.get()
- return config['template']
+ return config.get('template', False)
+
+ def update_config(self, vmid, node, disk, cpus, memory, swap, **kwargs):
+ if VZ_TYPE != "lxc":
+ self.module.fail_json(
+ changed=False,
+ msg="Updating configuration is only supported for LXC enabled proxmox clusters.",
+ )
+
+ # Version limited features
+ minimum_version = {"tags": "6.1", "timezone": "6.3"}
+ proxmox_node = self.proxmox_api.nodes(node)
+
+ pve_version = self.version()
+
+ # Fail on unsupported features
+ for option, version in minimum_version.items():
+ if pve_version < LooseVersion(version) and option in kwargs:
+ self.module.fail_json(
+ changed=False,
+ msg="Feature {option} is only supported in PVE {version}+, and you're using PVE {pve_version}".format(
+ option=option, version=version, pve_version=pve_version
+ ),
+ )
+
+ # Remove all empty kwarg entries
+ kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
+
+ if cpus is not None:
+ kwargs["cpulimit"] = cpus
+ if disk is not None:
+ kwargs["rootfs"] = disk
+ if memory is not None:
+ kwargs["memory"] = memory
+ if swap is not None:
+ kwargs["swap"] = swap
+ if "netif" in kwargs:
+ kwargs.update(kwargs["netif"])
+ del kwargs["netif"]
+ if "mounts" in kwargs:
+ kwargs.update(kwargs["mounts"])
+ del kwargs["mounts"]
+ # LXC tags are expected to be valid and presented as a comma/semi-colon delimited string
+ if "tags" in kwargs:
+ re_tag = re.compile(r"^[a-z0-9_][a-z0-9_\-\+\.]*$")
+ for tag in kwargs["tags"]:
+ if not re_tag.match(tag):
+ self.module.fail_json(msg="%s is not a valid tag" % tag)
+ kwargs["tags"] = ",".join(kwargs["tags"])
+
+ # fetch the current config
+ current_config = getattr(proxmox_node, VZ_TYPE)(vmid).config.get()
+
+ # compare the requested config against the current
+ update_config = False
+ for (arg, value) in kwargs.items():
+ # if the arg isn't in the current config, it needs to be updated
+ if arg not in current_config:
+ update_config = True
+ break
+ # some values are lists, the order isn't always the same, so split them and compare by key
+ if isinstance(value, str):
+ current_values = current_config[arg].split(",")
+ requested_values = value.split(",")
+ for new_value in requested_values:
+ if new_value not in current_values:
+ update_config = True
+ break
+ # if it's not a list (or string) just compare the current value
+ else:
+ # some types don't match with the API, so forcing to string for comparison
+ if str(value) != str(current_config[arg]):
+ update_config = True
+ break
+
+ if update_config:
+ getattr(proxmox_node, VZ_TYPE)(vmid).config.put(vmid=vmid, node=node, **kwargs)
+ else:
+ self.module.exit_json(changed=False, msg="Container config is already up to date")
def create_instance(self, vmid, node, disk, storage, cpus, memory, swap, timeout, clone, **kwargs):
# Version limited features
minimum_version = {
- 'tags': 7,
+ 'tags': '6.1',
+ 'timezone': '6.3'
}
proxmox_node = self.proxmox_api.nodes(node)
# Remove all empty kwarg entries
kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
- version = self.version()
- pve_major_version = 3 if version < LooseVersion('4.0') else version.version[0]
+ pve_version = self.version()
# Fail on unsupported features
for option, version in minimum_version.items():
- if pve_major_version < version and option in kwargs:
- self.module.fail_json(changed=False, msg="Feature {option} is only supported in PVE {version}+, and you're using PVE {pve_major_version}".
- format(option=option, version=version, pve_major_version=pve_major_version))
+ if pve_version < LooseVersion(version) and option in kwargs:
+ self.module.fail_json(changed=False, msg="Feature {option} is only supported in PVE {version}+, and you're using PVE {pve_version}".
+ format(option=option, version=version, pve_version=pve_version))
if VZ_TYPE == 'lxc':
kwargs['cpulimit'] = cpus
@@ -474,6 +640,9 @@ class ProxmoxLxcAnsible(ProxmoxAnsible):
self.module.fail_json(msg='%s is not a valid tag' % tag)
kwargs['tags'] = ",".join(kwargs['tags'])
+ if kwargs.get('ostype') == 'auto':
+ kwargs.pop('ostype')
+
if clone is not None:
if VZ_TYPE != 'lxc':
self.module.fail_json(changed=False, msg="Clone operator is only supported for LXC enabled proxmox clusters.")
@@ -523,7 +692,7 @@ class ProxmoxLxcAnsible(ProxmoxAnsible):
return True
timeout -= 1
if timeout == 0:
- self.module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' %
+ self.module.fail_json(vmid=vmid, node=node, msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' %
proxmox_node.tasks(taskid).log.get()[:1])
time.sleep(1)
@@ -536,7 +705,7 @@ class ProxmoxLxcAnsible(ProxmoxAnsible):
return True
timeout -= 1
if timeout == 0:
- self.module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' %
+ self.module.fail_json(vmid=vmid, taskid=taskid, msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' %
self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
time.sleep(1)
@@ -552,12 +721,19 @@ class ProxmoxLxcAnsible(ProxmoxAnsible):
return True
timeout -= 1
if timeout == 0:
- self.module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' %
+ self.module.fail_json(vmid=vmid, taskid=taskid, msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' %
self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
time.sleep(1)
return False
+ def convert_to_template(self, vm, vmid, timeout, force):
+ if getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running' and force:
+ self.stop_instance(vm, vmid, timeout, force)
+ # not sure why, but templating a container doesn't return a taskid
+ getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).template.post()
+ return True
+
def umount_instance(self, vm, vmid, timeout):
taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.umount.post()
while timeout:
@@ -565,7 +741,7 @@ class ProxmoxLxcAnsible(ProxmoxAnsible):
return True
timeout -= 1
if timeout == 0:
- self.module.fail_json(msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s' %
+ self.module.fail_json(vmid=vmid, taskid=taskid, msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s' %
self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
time.sleep(1)
@@ -589,21 +765,28 @@ def main():
netif=dict(type='dict'),
mounts=dict(type='dict'),
ip_address=dict(),
+ ostype=dict(default='auto', choices=[
+ 'auto', 'debian', 'devuan', 'ubuntu', 'centos', 'fedora', 'opensuse', 'archlinux', 'alpine', 'gentoo', 'nixos', 'unmanaged'
+ ]),
onboot=dict(type='bool'),
features=dict(type='list', elements='str'),
+ startup=dict(type='list', elements='str'),
storage=dict(default='local'),
cpuunits=dict(type='int'),
nameserver=dict(),
searchdomain=dict(),
timeout=dict(type='int', default=30),
+ update=dict(type='bool', default=False),
force=dict(type='bool', default=False),
purge=dict(type='bool', default=False),
- state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']),
+ state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted', 'template']),
pubkey=dict(type='str'),
- unprivileged=dict(type='bool'),
+ unprivileged=dict(type='bool', default=True),
description=dict(type='str'),
hookscript=dict(type='str'),
- proxmox_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']),
+ timezone=dict(type='str'),
+ proxmox_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults'],
+ removed_in_version='9.0.0', removed_from_collection='community.general'),
clone=dict(type='int'),
clone_type=dict(default='opportunistic', choices=['full', 'linked', 'opportunistic']),
tags=dict(type='list', elements='str')
@@ -614,14 +797,15 @@ def main():
argument_spec=module_args,
required_if=[
('state', 'present', ['node', 'hostname']),
- ('state', 'present', ('clone', 'ostemplate'), True), # Require one of clone and ostemplate. Together with mutually_exclusive this ensures that we
- # either clone a container or create a new one from a template file.
+ # Require one of clone, ostemplate, or update. Together with mutually_exclusive this ensures that we
+ # either clone a container or create a new one from a template file.
+ ('state', 'present', ('clone', 'ostemplate', 'update'), True),
],
required_together=[
('api_token_id', 'api_token_secret')
],
required_one_of=[('api_password', 'api_token_id')],
- mutually_exclusive=[('clone', 'ostemplate')], # Creating a new container is done either by cloning an existing one, or based on a template.
+ mutually_exclusive=[('clone', 'ostemplate', 'update')], # Creating a new container is done either by cloning an existing one, or based on a template.
)
proxmox = ProxmoxLxcAnsible(module)
@@ -643,14 +827,6 @@ def main():
timeout = module.params['timeout']
clone = module.params['clone']
- if module.params['unprivileged'] is None:
- module.params['unprivileged'] = False
- module.deprecate(
- 'The default value `false` for the parameter "unprivileged" is deprecated and it will be replaced with `true`',
- version='7.0.0',
- collection_name='community.general'
- )
-
if module.params['proxmox_default_behavior'] == 'compatibility':
old_default_values = dict(
disk="3",
@@ -677,21 +853,59 @@ def main():
# Create a new container
if state == 'present' and clone is None:
try:
- if proxmox.get_vm(vmid, ignore_missing=True) and not module.params['force']:
- module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
+ if proxmox.get_vm(vmid, ignore_missing=True):
+ if module.params["update"]:
+ try:
+ proxmox.update_config(vmid, node, disk, cpus, memory, swap,
+ cores=module.params["cores"],
+ hostname=module.params["hostname"],
+ netif=module.params["netif"],
+ mounts=module.params["mounts"],
+ ip_address=module.params["ip_address"],
+ onboot=ansible_to_proxmox_bool(module.params["onboot"]),
+ cpuunits=module.params["cpuunits"],
+ nameserver=module.params["nameserver"],
+ searchdomain=module.params["searchdomain"],
+ features=",".join(module.params["features"])
+ if module.params["features"] is not None
+ else None,
+ startup=",".join(module.params["startup"])
+ if module.params["startup"] is not None
+ else None,
+ description=module.params["description"],
+ hookscript=module.params["hookscript"],
+ timezone=module.params["timezone"],
+ tags=module.params["tags"])
+ module.exit_json(
+ changed=True,
+ vmid=vmid,
+ msg="Configured VM %s" % (vmid),
+ )
+ except Exception as e:
+ module.fail_json(
+ vmid=vmid,
+ msg="Configuration of %s VM %s failed with exception: %s"
+ % (VZ_TYPE, vmid, e),
+ )
+ if not module.params["force"]:
+ module.exit_json(
+ changed=False,
+ vmid=vmid,
+ msg="VM with vmid = %s is already exists" % vmid,
+ )
# If no vmid was passed, there cannot be another VM named 'hostname'
if (not module.params['vmid'] and
proxmox.get_vmid(hostname, ignore_missing=True) and
not module.params['force']):
vmid = proxmox.get_vmid(hostname)
- module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid))
+ module.exit_json(changed=False, vmid=vmid, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid))
elif not proxmox.get_node(node):
- module.fail_json(msg="node '%s' not exists in cluster" % node)
+ module.fail_json(vmid=vmid, msg="node '%s' not exists in cluster" % node)
elif not proxmox.content_check(node, module.params['ostemplate'], template_store):
- module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s"
+ module.fail_json(vmid=vmid, msg="ostemplate '%s' not exists on node %s and storage %s"
% (module.params['ostemplate'], node, template_store))
except Exception as e:
- module.fail_json(msg="Pre-creation checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e))
+ module.fail_json(vmid=vmid, msg="Pre-creation checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e))
try:
proxmox.create_instance(vmid, node, disk, storage, cpus, memory, swap, timeout, clone,
@@ -702,6 +916,7 @@ def main():
ostemplate=module.params['ostemplate'],
netif=module.params['netif'],
mounts=module.params['mounts'],
+ ostype=module.params['ostype'],
ip_address=module.params['ip_address'],
onboot=ansible_to_proxmox_bool(module.params['onboot']),
cpuunits=module.params['cpuunits'],
@@ -710,48 +925,50 @@ def main():
force=ansible_to_proxmox_bool(module.params['force']),
pubkey=module.params['pubkey'],
features=",".join(module.params['features']) if module.params['features'] is not None else None,
+ startup=",".join(module.params['startup']) if module.params['startup'] is not None else None,
unprivileged=ansible_to_proxmox_bool(module.params['unprivileged']),
description=module.params['description'],
hookscript=module.params['hookscript'],
+ timezone=module.params['timezone'],
tags=module.params['tags'])
- module.exit_json(changed=True, msg="Deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
+ module.exit_json(changed=True, vmid=vmid, msg="Deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
except Exception as e:
- module.fail_json(msg="Creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
+ module.fail_json(vmid=vmid, msg="Creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
# Clone a container
elif state == 'present' and clone is not None:
try:
if proxmox.get_vm(vmid, ignore_missing=True) and not module.params['force']:
- module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
+ module.exit_json(changed=False, vmid=vmid, msg="VM with vmid = %s is already exists" % vmid)
# If no vmid was passed, there cannot be another VM named 'hostname'
if (not module.params['vmid'] and
proxmox.get_vmid(hostname, ignore_missing=True) and
not module.params['force']):
vmid = proxmox.get_vmid(hostname)
- module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid))
+ module.exit_json(changed=False, vmid=vmid, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid))
if not proxmox.get_vm(clone, ignore_missing=True):
- module.exit_json(changed=False, msg="Container to be cloned does not exist")
+ module.exit_json(changed=False, vmid=vmid, msg="Container to be cloned does not exist")
except Exception as e:
- module.fail_json(msg="Pre-clone checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e))
+ module.fail_json(vmid=vmid, msg="Pre-clone checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e))
try:
proxmox.create_instance(vmid, node, disk, storage, cpus, memory, swap, timeout, clone)
- module.exit_json(changed=True, msg="Cloned VM %s from %s" % (vmid, clone))
+ module.exit_json(changed=True, vmid=vmid, msg="Cloned VM %s from %s" % (vmid, clone))
except Exception as e:
- module.fail_json(msg="Cloning %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
+ module.fail_json(vmid=vmid, msg="Cloning %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
elif state == 'started':
try:
vm = proxmox.get_vm(vmid)
if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
- module.exit_json(changed=False, msg="VM %s is already running" % vmid)
+ module.exit_json(changed=False, vmid=vmid, msg="VM %s is already running" % vmid)
if proxmox.start_instance(vm, vmid, timeout):
- module.exit_json(changed=True, msg="VM %s started" % vmid)
+ module.exit_json(changed=True, vmid=vmid, msg="VM %s started" % vmid)
except Exception as e:
- module.fail_json(msg="starting of VM %s failed with exception: %s" % (vmid, e))
+ module.fail_json(vmid=vmid, msg="starting of VM %s failed with exception: %s" % (vmid, e))
elif state == 'stopped':
try:
@@ -760,18 +977,27 @@ def main():
if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
if module.params['force']:
if proxmox.umount_instance(vm, vmid, timeout):
- module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
+ module.exit_json(changed=True, vmid=vmid, msg="VM %s is shutting down" % vmid)
else:
- module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. "
- "You can use force option to umount it.") % vmid)
+ module.exit_json(changed=False, vmid=vmid,
+ msg=("VM %s is already shutdown, but mounted. You can use force option to umount it.") % vmid)
if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped':
- module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid)
+ module.exit_json(changed=False, vmid=vmid, msg="VM %s is already shutdown" % vmid)
if proxmox.stop_instance(vm, vmid, timeout, force=module.params['force']):
- module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
+ module.exit_json(changed=True, vmid=vmid, msg="VM %s is shutting down" % vmid)
+ except Exception as e:
+ module.fail_json(vmid=vmid, msg="stopping of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'template':
+ try:
+ vm = proxmox.get_vm(vmid)
+
+ proxmox.convert_to_template(vm, vmid, timeout, force=module.params['force'])
+ module.exit_json(changed=True, msg="VM %s is converted to template" % vmid)
except Exception as e:
- module.fail_json(msg="stopping of VM %s failed with exception: %s" % (vmid, e))
+ module.fail_json(vmid=vmid, msg="conversion of VM %s to template failed with exception: %s" % (vmid, e))
elif state == 'restarted':
try:
@@ -779,28 +1005,28 @@ def main():
vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status']
if vm_status in ['stopped', 'mounted']:
- module.exit_json(changed=False, msg="VM %s is not running" % vmid)
+ module.exit_json(changed=False, vmid=vmid, msg="VM %s is not running" % vmid)
if (proxmox.stop_instance(vm, vmid, timeout, force=module.params['force']) and
proxmox.start_instance(vm, vmid, timeout)):
- module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
+ module.exit_json(changed=True, vmid=vmid, msg="VM %s is restarted" % vmid)
except Exception as e:
- module.fail_json(msg="restarting of VM %s failed with exception: %s" % (vmid, e))
+ module.fail_json(vmid=vmid, msg="restarting of VM %s failed with exception: %s" % (vmid, e))
elif state == 'absent':
if not vmid:
- module.exit_json(changed=False, msg='VM with hostname = %s is already absent' % hostname)
+ module.exit_json(changed=False, vmid=vmid, msg='VM with hostname = %s is already absent' % hostname)
try:
vm = proxmox.get_vm(vmid, ignore_missing=True)
if not vm:
- module.exit_json(changed=False, msg="VM %s does not exist" % vmid)
+ module.exit_json(changed=False, vmid=vmid, msg="VM %s does not exist" % vmid)
vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status']
if vm_status == 'running':
- module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid)
+ module.exit_json(changed=False, vmid=vmid, msg="VM %s is running. Stop it before deletion." % vmid)
if vm_status == 'mounted':
- module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid)
+ module.exit_json(changed=False, vmid=vmid, msg="VM %s is mounted. Stop it with force option before deletion." % vmid)
delete_params = {}
@@ -811,15 +1037,15 @@ def main():
while timeout:
if proxmox.api_task_ok(vm['node'], taskid):
- module.exit_json(changed=True, msg="VM %s removed" % vmid)
+ module.exit_json(changed=True, vmid=vmid, taskid=taskid, msg="VM %s removed" % vmid)
timeout -= 1
if timeout == 0:
- module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s'
+ module.fail_json(vmid=vmid, taskid=taskid, msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s'
% proxmox.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
time.sleep(1)
except Exception as e:
- module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, to_native(e)))
+ module.fail_json(vmid=vmid, msg="deletion of VM %s failed with exception: %s" % (vmid, to_native(e)))
if __name__ == '__main__':
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_disk.py b/ansible_collections/community/general/plugins/modules/proxmox_disk.py
index df6735cc0..69a7300df 100644
--- a/ansible_collections/community/general/plugins/modules/proxmox_disk.py
+++ b/ansible_collections/community/general/plugins/modules/proxmox_disk.py
@@ -25,17 +25,17 @@ options:
name:
description:
- The unique name of the VM.
- - You can specify either I(name) or I(vmid) or both of them.
+ - You can specify either O(name) or O(vmid) or both of them.
type: str
vmid:
description:
- The unique ID of the VM.
- - You can specify either I(vmid) or I(name) or both of them.
+ - You can specify either O(vmid) or O(name) or both of them.
type: int
disk:
description:
- - The disk key (C(unused[n]), C(ide[n]), C(sata[n]), C(scsi[n]) or C(virtio[n])) you want to operate on.
- - Disk buses (IDE, SATA and so on) have fixed ranges of C(n) that accepted by Proxmox API.
+ - The disk key (V(unused[n]), V(ide[n]), V(sata[n]), V(scsi[n]) or V(virtio[n])) you want to operate on.
+ - Disk buses (IDE, SATA and so on) have fixed ranges of V(n) that accepted by Proxmox API.
- >
For IDE: 0-3;
for SCSI: 0-30;
@@ -48,79 +48,79 @@ options:
description:
- Indicates desired state of the disk.
- >
- I(state=present) can be used to create, replace disk or update options in existing disk. It will create missing
- disk or update options in existing one by default. See the I(create) parameter description to control behavior
+ O(state=present) can be used to create, replace disk or update options in existing disk. It will create missing
+ disk or update options in existing one by default. See the O(create) parameter description to control behavior
of this option.
- - Some updates on options (like I(cache)) are not being applied instantly and require VM restart.
+ - Some updates on options (like O(cache)) are not being applied instantly and require VM restart.
- >
- Use I(state=detached) to detach existing disk from VM but do not remove it entirely.
- When I(state=detached) and disk is C(unused[n]) it will be left in same state (not removed).
+ Use O(state=detached) to detach existing disk from VM but do not remove it entirely.
+ When O(state=detached) and disk is V(unused[n]) it will be left in same state (not removed).
- >
- I(state=moved) may be used to change backing storage for the disk in bounds of the same VM
+ O(state=moved) may be used to change backing storage for the disk in bounds of the same VM
or to send the disk to another VM (using the same backing storage).
- >
- I(state=resized) intended to change the disk size. As of Proxmox 7.2 you can only increase the disk size
+ O(state=resized) intended to change the disk size. As of Proxmox 7.2 you can only increase the disk size
because shrinking disks is not supported by the PVE API and has to be done manually.
- - To entirely remove the disk from backing storage use I(state=absent).
+ - To entirely remove the disk from backing storage use O(state=absent).
type: str
choices: ['present', 'resized', 'detached', 'moved', 'absent']
default: present
create:
description:
- - With I(create) flag you can control behavior of I(state=present).
- - When I(create=disabled) it will not create new disk (if not exists) but will update options in existing disk.
- - When I(create=regular) it will either create new disk (if not exists) or update options in existing disk.
- - When I(create=forced) it will always create new disk (if disk exists it will be detached and left unused).
+ - With O(create) flag you can control behavior of O(state=present).
+ - When O(create=disabled) it will not create new disk (if not exists) but will update options in existing disk.
+ - When O(create=regular) it will either create new disk (if not exists) or update options in existing disk.
+ - When O(create=forced) it will always create new disk (if disk exists it will be detached and left unused).
type: str
choices: ['disabled', 'regular', 'forced']
default: regular
storage:
description:
- The drive's backing storage.
- - Used only when I(state) is C(present).
+ - Used only when O(state) is V(present).
type: str
size:
description:
- - Desired volume size in GB to allocate when I(state=present) (specify I(size) without suffix).
+ - Desired volume size in GB to allocate when O(state=present) (specify O(size) without suffix).
- >
- New (or additional) size of volume when I(state=resized). With the C(+) sign
+ New (or additional) size of volume when O(state=resized). With the V(+) sign
the value is added to the actual size of the volume
and without it, the value is taken as an absolute one.
type: str
bwlimit:
description:
- Override I/O bandwidth limit (in KB/s).
- - Used only when I(state=moved).
+ - Used only when O(state=moved).
type: int
delete_moved:
description:
- Delete the original disk after successful copy.
- By default the original disk is kept as unused disk.
- - Used only when I(state=moved).
+ - Used only when O(state=moved).
type: bool
target_disk:
description:
- - The config key the disk will be moved to on the target VM (for example, C(ide0) or C(scsi1)).
+ - The config key the disk will be moved to on the target VM (for example, V(ide0) or V(scsi1)).
- Default is the source disk key.
- - Used only when I(state=moved).
+ - Used only when O(state=moved).
type: str
target_storage:
description:
- - Move the disk to this storage when I(state=moved).
+ - Move the disk to this storage when O(state=moved).
- You can move between storages only in scope of one VM.
- - Mutually exclusive with I(target_vmid).
- - Consider increasing I(timeout) in case of large disk images or slow storage backend.
+ - Mutually exclusive with O(target_vmid).
+ - Consider increasing O(timeout) in case of large disk images or slow storage backend.
type: str
target_vmid:
description:
- - The (unique) ID of the VM where disk will be placed when I(state=moved).
+ - The (unique) ID of the VM where disk will be placed when O(state=moved).
- You can move disk between VMs only when the same storage is used.
- - Mutually exclusive with I(target_vmid).
+ - Mutually exclusive with O(target_vmid).
type: int
timeout:
description:
- Timeout in seconds to wait for slow operations such as importing disk or moving disk between storages.
- - Used only when I(state) is C(present) or C(moved).
+ - Used only when O(state) is V(present) or V(moved).
type: int
default: 600
aio:
@@ -177,13 +177,13 @@ options:
- Volume string format
- C(<STORAGE>:<VMID>/<FULL_NAME>) or C(<ABSOLUTE_PATH>/<FULL_NAME>)
- Attention! Only root can use absolute paths.
- - This parameter is mutually exclusive with I(size).
- - Increase I(timeout) parameter when importing large disk images or using slow storage.
+ - This parameter is mutually exclusive with O(size).
+ - Increase O(timeout) parameter when importing large disk images or using slow storage.
type: str
iops:
description:
- Maximum total r/w I/O in operations per second.
- - You can specify either total limit or per operation (mutually exclusive with I(iops_rd) and I(iops_wr)).
+ - You can specify either total limit or per operation (mutually exclusive with O(iops_rd) and O(iops_wr)).
type: int
iops_max:
description:
@@ -196,7 +196,7 @@ options:
iops_rd:
description:
- Maximum read I/O in operations per second.
- - You can specify either read or total limit (mutually exclusive with I(iops)).
+ - You can specify either read or total limit (mutually exclusive with O(iops)).
type: int
iops_rd_max:
description:
@@ -209,7 +209,7 @@ options:
iops_wr:
description:
- Maximum write I/O in operations per second.
- - You can specify either write or total limit (mutually exclusive with I(iops)).
+ - You can specify either write or total limit (mutually exclusive with O(iops)).
type: int
iops_wr_max:
description:
@@ -227,7 +227,7 @@ options:
description:
- Maximum total r/w speed in megabytes per second.
- Can be fractional but use with caution - fractionals less than 1 are not supported officially.
- - You can specify either total limit or per operation (mutually exclusive with I(mbps_rd) and I(mbps_wr)).
+ - You can specify either total limit or per operation (mutually exclusive with O(mbps_rd) and O(mbps_wr)).
type: float
mbps_max:
description:
@@ -236,7 +236,7 @@ options:
mbps_rd:
description:
- Maximum read speed in megabytes per second.
- - You can specify either read or total limit (mutually exclusive with I(mbps)).
+ - You can specify either read or total limit (mutually exclusive with O(mbps)).
type: float
mbps_rd_max:
description:
@@ -245,7 +245,7 @@ options:
mbps_wr:
description:
- Maximum write speed in megabytes per second.
- - You can specify either write or total limit (mutually exclusive with I(mbps)).
+ - You can specify either write or total limit (mutually exclusive with O(mbps)).
type: float
mbps_wr_max:
description:
@@ -256,6 +256,16 @@ options:
- The drive's media type.
type: str
choices: ['cdrom', 'disk']
+ iso_image:
+ description:
+ - The ISO image to be mounted on the specified in O(disk) CD-ROM.
+ - O(media=cdrom) needs to be specified for this option to work.
+ - "Image string format:"
+ - V(<STORAGE>:iso/<ISO_NAME>) to mount ISO.
+ - V(cdrom) to use physical CD/DVD drive.
+ - V(none) to unmount image from existent CD-ROM or create empty CD-ROM drive.
+ type: str
+ version_added: 8.1.0
queues:
description:
- Number of queues (SCSI only).
@@ -312,7 +322,7 @@ options:
choices: ['enospc', 'ignore', 'report', 'stop']
wwn:
description:
- - The drive's worldwide name, encoded as 16 bytes hex string, prefixed by C(0x).
+ - The drive's worldwide name, encoded as 16 bytes hex string, prefixed by V(0x).
type: str
extends_documentation_fragment:
- community.general.proxmox.documentation
@@ -412,6 +422,18 @@ EXAMPLES = '''
vmid: 101
disk: scsi4
state: absent
+
+- name: Mount ISO image on CD-ROM (create drive if missing)
+ community.general.proxmox_disk:
+ api_host: node1
+ api_user: root@pam
+ api_token_id: token1
+ api_token_secret: some-token-data
+ vmid: 101
+ disk: ide2
+ media: cdrom
+ iso_image: local:iso/favorite_distro_amd64.iso
+ state: present
'''
RETURN = '''
@@ -435,17 +457,41 @@ from time import sleep
def disk_conf_str_to_dict(config_string):
+ """
+ Transform Proxmox configuration string for disk element into dictionary which has
+ volume option parsed in '{ storage }:{ volume }' format and other options parsed
+ in '{ option }={ value }' format. This dictionary will be compared afterward with
+ attributes that user passed to this module in playbook.\n
+ config_string examples:
+ - local-lvm:vm-100-disk-0,ssd=1,discard=on,size=25G
+ - local:iso/new-vm-ignition.iso,media=cdrom,size=70k
+ - none,media=cdrom
+ :param config_string: Retrieved from Proxmox API configuration string
+ :return: Dictionary with volume option divided into parts ('volume_name', 'storage_name', 'volume') \n
+ and other options as key:value.
+ """
config = config_string.split(',')
- storage_volume = config.pop(0).split(':')
- config.sort()
- storage_name = storage_volume[0]
- volume_name = storage_volume[1]
- config_current = dict(
- volume='%s:%s' % (storage_name, volume_name),
- storage_name=storage_name,
- volume_name=volume_name
- )
+ # When empty CD-ROM drive present, the volume part of config string is "none".
+ storage_volume = config.pop(0)
+ if storage_volume in ["none", "cdrom"]:
+ config_current = dict(
+ volume=storage_volume,
+ storage_name=None,
+ volume_name=None,
+ size=None,
+ )
+ else:
+ storage_volume = storage_volume.split(':')
+ storage_name = storage_volume[0]
+ volume_name = storage_volume[1]
+ config_current = dict(
+ volume='%s:%s' % (storage_name, volume_name),
+ storage_name=storage_name,
+ volume_name=volume_name,
+ )
+
+ config.sort()
for option in config:
k, v = option.split('=')
config_current[k] = v
@@ -497,41 +543,61 @@ class ProxmoxDiskAnsible(ProxmoxAnsible):
if (create == 'regular' and disk not in vm_config) or (create == 'forced'):
# CREATE
- attributes = self.get_create_attributes()
- import_string = attributes.pop('import_from', None)
+ playbook_config = self.get_create_attributes()
+ import_string = playbook_config.pop('import_from', None)
+ iso_image = self.module.params.get('iso_image', None)
if import_string:
+ # When 'import_from' option is present in task options.
config_str = "%s:%s,import-from=%s" % (self.module.params["storage"], "0", import_string)
timeout_str = "Reached timeout while importing VM disk. Last line in task before timeout: %s"
ok_str = "Disk %s imported into VM %s"
+ elif iso_image is not None:
+ # disk=<busN>, media=cdrom, iso_image=<ISO_NAME>
+ config_str = iso_image
+ ok_str = "CD-ROM was created on %s bus in VM %s"
else:
- config_str = "%s:%s" % (self.module.params["storage"], self.module.params["size"])
+ config_str = self.module.params["storage"]
+ if self.module.params.get("media") != "cdrom":
+ config_str += ":%s" % (self.module.params["size"])
ok_str = "Disk %s created in VM %s"
timeout_str = "Reached timeout while creating VM disk. Last line in task before timeout: %s"
- for k, v in attributes.items():
+ for k, v in playbook_config.items():
config_str += ',%s=%s' % (k, v)
disk_config_to_apply = {self.module.params["disk"]: config_str}
if create in ['disabled', 'regular'] and disk in vm_config:
# UPDATE
- disk_config = disk_conf_str_to_dict(vm_config[disk])
- config_str = disk_config["volume"]
ok_str = "Disk %s updated in VM %s"
- attributes = self.get_create_attributes()
+ iso_image = self.module.params.get('iso_image', None)
+
+ proxmox_config = disk_conf_str_to_dict(vm_config[disk])
# 'import_from' fails on disk updates
- attributes.pop('import_from', None)
+ playbook_config = self.get_create_attributes()
+ playbook_config.pop('import_from', None)
- for k, v in attributes.items():
+ # Begin composing configuration string
+ if iso_image is not None:
+ config_str = iso_image
+ else:
+ config_str = proxmox_config["volume"]
+ # Append all mandatory fields from playbook_config
+ for k, v in playbook_config.items():
config_str += ',%s=%s' % (k, v)
- # Now compare old and new config to detect if changes are needed
+ # Append to playbook_config fields which are constants for disk images
for option in ['size', 'storage_name', 'volume', 'volume_name']:
- attributes.update({option: disk_config[option]})
+ playbook_config.update({option: proxmox_config[option]})
+ # CD-ROM is special disk device and its disk image is subject to change
+ if iso_image is not None:
+ playbook_config['volume'] = iso_image
# Values in params are numbers, but strings are needed to compare with disk_config
- attributes = dict((k, str(v)) for k, v in attributes.items())
- if disk_config == attributes:
+ playbook_config = dict((k, str(v)) for k, v in playbook_config.items())
+
+ # Now compare old and new config to detect if changes are needed
+ if proxmox_config == playbook_config:
return False, "Disk %s is up to date in VM %s" % (disk, vmid)
disk_config_to_apply = {self.module.params["disk"]: config_str}
@@ -600,6 +666,7 @@ def main():
iops_wr_max=dict(type='int'),
iops_wr_max_length=dict(type='int'),
iothread=dict(type='bool'),
+ iso_image=dict(type='str'),
mbps=dict(type='float'),
mbps_max=dict(type='float'),
mbps_rd=dict(type='float'),
@@ -664,6 +731,7 @@ def main():
'iops_max_length': 'iops_max',
'iops_rd_max_length': 'iops_rd_max',
'iops_wr_max_length': 'iops_wr_max',
+ 'iso_image': 'media',
},
supports_check_mode=False,
mutually_exclusive=[
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_kvm.py b/ansible_collections/community/general/plugins/modules/proxmox_kvm.py
index 1dba5f4ea..8779dcdc1 100644
--- a/ansible_collections/community/general/plugins/modules/proxmox_kvm.py
+++ b/ansible_collections/community/general/plugins/modules/proxmox_kvm.py
@@ -14,7 +14,7 @@ module: proxmox_kvm
short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster
description:
- Allows you to create/delete/stop Qemu(KVM) Virtual Machines in Proxmox VE cluster.
- - Since community.general 4.0.0 on, there are no more default values, see I(proxmox_default_behavior).
+ - Since community.general 4.0.0 on, there are no more default values, see O(proxmox_default_behavior).
author: "Abdoul Bah (@helldorado) <bahabdoul at gmail.com>"
attributes:
check_mode:
@@ -30,31 +30,31 @@ options:
acpi:
description:
- Specify if ACPI should be enabled/disabled.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(true).
+ - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(true).
type: bool
agent:
description:
- Specify if the QEMU Guest Agent should be enabled/disabled.
- Since community.general 5.5.0, this can also be a string instead of a boolean.
- This allows to specify values such as C(enabled=1,fstrim_cloned_disks=1).
+ This allows to specify values such as V(enabled=1,fstrim_cloned_disks=1).
type: str
args:
description:
- Pass arbitrary arguments to kvm.
- This option is for experts only!
- - If I(proxmox_default_behavior) is set to C(compatiblity), this option has a default of
- C(-serial unix:/var/run/qemu-server/<vmid>.serial,server,nowait).
+ - If O(proxmox_default_behavior) is set to V(compatibility), this option has a default of
+ V(-serial unix:/var/run/qemu-server/<vmid>.serial,server,nowait).
type: str
autostart:
description:
- Specify if the VM should be automatically restarted after crash (currently ignored in PVE API).
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(false).
+ - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(false).
type: bool
balloon:
description:
- Specify the amount of RAM for the VM in MB.
- Using zero disables the balloon driver.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(0).
+ - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(0).
type: int
bios:
description:
@@ -63,13 +63,14 @@ options:
choices: ['seabios', 'ovmf']
boot:
description:
- - Specify the boot order -> boot on floppy C(a), hard disk C(c), CD-ROM C(d), or network C(n).
+ - Specify the boot order -> boot on floppy V(a), hard disk V(c), CD-ROM V(d), or network V(n).
+ - For newer versions of Proxmox VE, use a boot order like V(order=scsi0;net0;hostpci0).
- You can combine to set order.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(cnd).
+ - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(cnd).
type: str
bootdisk:
description:
- - Enable booting from specified disk. C((ide|sata|scsi|virtio)\d+)
+ - 'Enable booting from specified disk. Format V((ide|sata|scsi|virtio\)\\d+).'
type: str
cicustom:
description:
@@ -84,8 +85,8 @@ options:
citype:
description:
- 'cloud-init: Specifies the cloud-init configuration format.'
- - The default depends on the configured operating system type (C(ostype)).
- - We use the C(nocloud) format for Linux, and C(configdrive2) for Windows.
+ - The default depends on the configured operating system type (V(ostype)).
+ - We use the V(nocloud) format for Linux, and V(configdrive2) for Windows.
type: str
choices: ['nocloud', 'configdrive2']
version_added: 1.3.0
@@ -96,17 +97,17 @@ options:
version_added: 1.3.0
clone:
description:
- - Name of VM to be cloned. If I(vmid) is set, I(clone) can take an arbitrary value but is required for initiating the clone.
+ - Name of VM to be cloned. If O(vmid) is set, O(clone) can take an arbitrary value but is required for initiating the clone.
type: str
cores:
description:
- Specify number of cores per socket.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1).
+ - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(1).
type: int
cpu:
description:
- Specify emulated CPU type.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(kvm64).
+ - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(kvm64).
type: str
cpulimit:
description:
@@ -117,7 +118,7 @@ options:
description:
- Specify CPU weight for a VM.
- You can disable fair-scheduler configuration by setting this to 0
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1000).
+ - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(1000).
type: int
delete:
description:
@@ -136,24 +137,24 @@ options:
efidisk0:
description:
- Specify a hash/dictionary of EFI disk options.
- - Requires I(bios=ovmf) to be set to be able to use it.
+ - Requires O(bios=ovmf) to be set to be able to use it.
type: dict
suboptions:
storage:
description:
- - C(storage) is the storage identifier where to create the disk.
+ - V(storage) is the storage identifier where to create the disk.
type: str
format:
description:
- - C(format) is the drive's backing file's data format. Please refer to the Proxmox VE Administrator Guide,
+ - V(format) is the drive's backing file's data format. Please refer to the Proxmox VE Administrator Guide,
section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest
version, tables 3 to 14) to find out format supported by the provided storage backend.
type: str
efitype:
description:
- - C(efitype) indicates the size of the EFI disk.
- - C(2m) will allow for a 2MB EFI disk, which will be enough to persist boot order and new boot entries.
- - C(4m) will allow for a 4MB EFI disk, which will additionally allow to store EFI keys in order to enable
+ - V(efitype) indicates the size of the EFI disk.
+ - V(2m) will allow for a 2MB EFI disk, which will be enough to persist boot order and new boot entries.
+ - V(4m) will allow for a 4MB EFI disk, which will additionally allow to store EFI keys in order to enable
Secure Boot
type: str
choices:
@@ -161,27 +162,27 @@ options:
- 4m
pre_enrolled_keys:
description:
- - C(pre_enrolled_keys) indicates whether EFI keys for Secure Boot should be enrolled C(1) in the VM firmware
+ - V(pre_enrolled_keys) indicates whether EFI keys for Secure Boot should be enrolled V(1) in the VM firmware
upon creation or not (0).
- - If set to C(1), Secure Boot will also be enabled by default when the VM is created.
+ - If set to V(1), Secure Boot will also be enabled by default when the VM is created.
type: bool
version_added: 4.5.0
force:
description:
- Allow to force stop VM.
- - Can be used with states C(stopped), C(restarted) and C(absent).
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(false).
+ - Can be used with states V(stopped), V(restarted), and V(absent).
+ - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(false).
type: bool
format:
description:
- Target drive's backing file's data format.
- Used only with clone
- - Use I(format=unspecified) and I(full=false) for a linked clone.
+ - Use O(format=unspecified) and O(full=false) for a linked clone.
- Please refer to the Proxmox VE Administrator Guide, section Proxmox VE Storage (see
U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest version, tables 3 to 14) to find out format
supported by the provided storage backend.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(qcow2).
- If I(proxmox_default_behavior) is set to C(no_defaults), not specifying this option is equivalent to setting it to C(unspecified).
+ - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(qcow2).
+ If O(proxmox_default_behavior) is set to V(no_defaults), not specifying this option is equivalent to setting it to V(unspecified).
type: str
choices: [ "cloop", "cow", "qcow", "qcow2", "qed", "raw", "vmdk", "unspecified" ]
freeze:
@@ -195,22 +196,27 @@ options:
- Used only with clone
type: bool
default: true
+ hookscript:
+ description:
+ - Script that will be executed during various steps in the containers lifetime.
+ type: str
+ version_added: 8.1.0
hostpci:
description:
- - Specify a hash/dictionary of map host pci devices into guest. C(hostpci='{"key":"value", "key":"value"}').
+ - Specify a hash/dictionary of map host pci devices into guest. O(hostpci='{"key":"value", "key":"value"}').
- Keys allowed are - C(hostpci[n]) where 0 ≤ n ≤ N.
- Values allowed are - C("host="HOSTPCIID[;HOSTPCIID2...]",pcie="1|0",rombar="1|0",x-vga="1|0"").
- The C(host) parameter is Host PCI device pass through. HOSTPCIID syntax is C(bus:dev.func) (hexadecimal numbers).
- - C(pcie=boolean) I(default=0) Choose the PCI-express bus (needs the q35 machine model).
- - C(rombar=boolean) I(default=1) Specify whether or not the device's ROM will be visible in the guest's memory map.
- - C(x-vga=boolean) I(default=0) Enable vfio-vga device support.
+ - C(pcie=boolean) C(default=0) Choose the PCI-express bus (needs the q35 machine model).
+ - C(rombar=boolean) C(default=1) Specify whether or not the device's ROM will be visible in the guest's memory map.
+ - C(x-vga=boolean) C(default=0) Enable vfio-vga device support.
- /!\ This option allows direct access to host hardware. So it is no longer possible to migrate such machines - use with special care.
type: dict
hotplug:
description:
- Selectively enable hotplug features.
- - This is a comma separated list of hotplug features C('network', 'disk', 'cpu', 'memory' and 'usb').
- - Value 0 disables hotplug completely and value 1 is an alias for the default C('network,disk,usb').
+ - This is a comma separated list of hotplug features V(network), V(disk), V(cpu), V(memory), and V(usb).
+ - Value 0 disables hotplug completely and value 1 is an alias for the default V(network,disk,usb).
type: str
hugepages:
description:
@@ -219,7 +225,7 @@ options:
choices: ['any', '2', '1024']
ide:
description:
- - A hash/dictionary of volume used as IDE hard disk or CD-ROM. C(ide='{"key":"value", "key":"value"}').
+ - A hash/dictionary of volume used as IDE hard disk or CD-ROM. O(ide='{"key":"value", "key":"value"}').
- Keys allowed are - C(ide[n]) where 0 ≤ n ≤ 3.
- Values allowed are - C("storage:size,format=value").
- C(storage) is the storage identifier where to create the disk.
@@ -231,7 +237,7 @@ options:
ipconfig:
description:
- 'cloud-init: Set the IP configuration.'
- - A hash/dictionary of network ip configurations. C(ipconfig='{"key":"value", "key":"value"}').
+ - A hash/dictionary of network ip configurations. O(ipconfig='{"key":"value", "key":"value"}').
- Keys allowed are - C(ipconfig[n]) where 0 ≤ n ≤ network interfaces.
- Values allowed are - C("[gw=<GatewayIPv4>] [,gw6=<GatewayIPv6>] [,ip=<IPv4Format/CIDR>] [,ip6=<IPv6Format/CIDR>]").
- 'cloud-init: Specify IP addresses and gateways for the corresponding interface.'
@@ -248,7 +254,7 @@ options:
kvm:
description:
- Enable/disable KVM hardware virtualization.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(true).
+ - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(true).
type: bool
localtime:
description:
@@ -263,13 +269,19 @@ options:
machine:
description:
- Specifies the Qemu machine type.
- - type => C((pc|pc(-i440fx)?-\d+\.\d+(\.pxe)?|q35|pc-q35-\d+\.\d+(\.pxe)?))
+ - 'Type => V((pc|pc(-i440fx\)?-\\d+\\.\\d+(\\.pxe\)?|q35|pc-q35-\\d+\\.\\d+(\\.pxe\)?\)).'
type: str
memory:
description:
- Memory size in MB for instance.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(512).
+ - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(512).
type: int
+ migrate:
+ description:
+ - Migrate the VM to O(node) if it is on another node.
+ type: bool
+ default: false
+ version_added: 7.0.0
migrate_downtime:
description:
- Sets maximum tolerated downtime (in seconds) for migrations.
@@ -281,8 +293,9 @@ options:
type: int
name:
description:
- - Specifies the VM name. Only used on the configuration web interface.
- - Required only for C(state=present).
+ - Specifies the VM name. Name could be non-unique across the cluster.
+ - Required only for O(state=present).
+ - With O(state=present) if O(vmid) not provided and VM with name exists in the cluster then no changes will be made.
type: str
nameservers:
description:
@@ -293,7 +306,7 @@ options:
version_added: 1.3.0
net:
description:
- - A hash/dictionary of network interfaces for the VM. C(net='{"key":"value", "key":"value"}').
+ - A hash/dictionary of network interfaces for the VM. O(net='{"key":"value", "key":"value"}').
- Keys allowed are - C(net[n]) where 0 ≤ n ≤ N.
- Values allowed are - C("model="XX:XX:XX:XX:XX:XX",bridge="value",rate="value",tag="value",firewall="1|0",trunks="vlanid"").
- Model is one of C(e1000 e1000-82540em e1000-82544gc e1000-82545em i82551 i82557b i82559er ne2k_isa ne2k_pci pcnet rtl8139 virtio vmxnet3).
@@ -309,7 +322,7 @@ options:
type: int
numa:
description:
- - A hash/dictionaries of NUMA topology. C(numa='{"key":"value", "key":"value"}').
+ - A hash/dictionaries of NUMA topology. O(numa='{"key":"value", "key":"value"}').
- Keys allowed are - C(numa[n]) where 0 ≤ n ≤ N.
- Values allowed are - C("cpu="<id[-id];...>",hostnodes="<id[-id];...>",memory="number",policy="(bind|interleave|preferred)"").
- C(cpus) CPUs accessing this NUMA node.
@@ -324,18 +337,18 @@ options:
onboot:
description:
- Specifies whether a VM will be started during system bootup.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(true).
+ - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(true).
type: bool
ostype:
description:
- Specifies guest operating system. This is used to enable special optimization/features for specific operating systems.
- The l26 is Linux 2.6/3.X Kernel.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(l26).
+ - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(l26).
type: str
choices: ['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'win11', 'l24', 'l26', 'solaris']
parallel:
description:
- - A hash/dictionary of map host parallel devices. C(parallel='{"key":"value", "key":"value"}').
+ - A hash/dictionary of map host parallel devices. O(parallel='{"key":"value", "key":"value"}').
- Keys allowed are - (parallel[n]) where 0 ≤ n ≤ 2.
- Values allowed are - C("/dev/parport\d+|/dev/usb/lp\d+").
type: dict
@@ -345,7 +358,7 @@ options:
type: bool
reboot:
description:
- - Allow reboot. If set to C(true), the VM exit on reboot.
+ - Allow reboot. If set to V(true), the VM exit on reboot.
type: bool
revert:
description:
@@ -353,7 +366,7 @@ options:
type: str
sata:
description:
- - A hash/dictionary of volume used as sata hard disk or CD-ROM. C(sata='{"key":"value", "key":"value"}').
+ - A hash/dictionary of volume used as sata hard disk or CD-ROM. O(sata='{"key":"value", "key":"value"}').
- Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 5.
- Values allowed are - C("storage:size,format=value").
- C(storage) is the storage identifier where to create the disk.
@@ -364,8 +377,8 @@ options:
type: dict
scsi:
description:
- - A hash/dictionary of volume used as SCSI hard disk or CD-ROM. C(scsi='{"key":"value", "key":"value"}').
- - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 13.
+ - A hash/dictionary of volume used as SCSI hard disk or CD-ROM. O(scsi='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(scsi[n]) where 0 ≤ n ≤ 13.
- Values allowed are - C("storage:size,format=value").
- C(storage) is the storage identifier where to create the disk.
- C(size) is the size of the disk in GB.
@@ -387,9 +400,9 @@ options:
version_added: 1.3.0
serial:
description:
- - A hash/dictionary of serial device to create inside the VM. C('{"key":"value", "key":"value"}').
+ - A hash/dictionary of serial device to create inside the VM. V('{"key":"value", "key":"value"}').
- Keys allowed are - serial[n](str; required) where 0 ≤ n ≤ 3.
- - Values allowed are - C((/dev/.+|socket)).
+ - Values allowed are - V((/dev/.+|socket\)).
- /!\ If you pass through a host serial device, it is no longer possible to migrate such machines - use with special care.
type: dict
shares:
@@ -407,6 +420,14 @@ options:
smbios:
description:
- Specifies SMBIOS type 1 fields.
+ - "Comma separated, Base64 encoded (optional) SMBIOS properties:"
+ - V([base64=<1|0>] [,family=<Base64 encoded string>])
+ - V([,manufacturer=<Base64 encoded string>])
+ - V([,product=<Base64 encoded string>])
+ - V([,serial=<Base64 encoded string>])
+ - V([,sku=<Base64 encoded string>])
+ - V([,uuid=<UUID>])
+ - V([,version=<Base64 encoded string>])
type: str
snapname:
description:
@@ -415,7 +436,7 @@ options:
sockets:
description:
- Sets the number of CPU sockets. (1 - N).
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1).
+ - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(1).
type: int
sshkeys:
description:
@@ -425,20 +446,21 @@ options:
startdate:
description:
- Sets the initial date of the real time clock.
- - Valid format for date are C('now') or C('2016-09-25T16:01:21') or C('2016-09-25').
+ - Valid format for date are V('now') or V('2016-09-25T16:01:21') or V('2016-09-25').
type: str
startup:
description:
- - Startup and shutdown behavior. C([[order=]\d+] [,up=\d+] [,down=\d+]).
+ - Startup and shutdown behavior. V([[order=]\\d+] [,up=\\d+] [,down=\\d+]).
- Order is a non-negative number defining the general startup order.
- Shutdown in done with reverse ordering.
type: str
state:
description:
- Indicates desired state of the instance.
- - If C(current), the current state of the VM will be fetched. You can access it with C(results.status)
+ - If V(current), the current state of the VM will be fetched. You can access it with C(results.status)
+ - V(template) was added in community.general 8.1.0.
type: str
- choices: ['present', 'started', 'absent', 'stopped', 'restarted','current']
+ choices: ['present', 'started', 'absent', 'stopped', 'restarted', 'current', 'template']
default: present
storage:
description:
@@ -447,12 +469,12 @@ options:
tablet:
description:
- Enables/disables the USB tablet device.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(false).
+ - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(false).
type: bool
tags:
description:
- List of tags to apply to the VM instance.
- - Tags must start with C([a-z0-9_]) followed by zero or more of the following characters C([a-z0-9_-+.]).
+ - Tags must start with V([a-z0-9_]) followed by zero or more of the following characters V([a-z0-9_-+.]).
- Tags are only available in Proxmox 6+.
type: list
elements: str
@@ -469,21 +491,48 @@ options:
template:
description:
- Enables/disables the template.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(false).
+ - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(false).
type: bool
timeout:
description:
- Timeout for operations.
+ - When used with O(state=stopped) the option sets a graceful timeout for VM stop after which a VM will be forcefully stopped.
type: int
default: 30
+ tpmstate0:
+ description:
+ - A hash/dictionary of options for the Trusted Platform Module disk.
+ - A TPM state disk is required for Windows 11 installations.
+ suboptions:
+ storage:
+ description:
+ - O(tpmstate0.storage) is the storage identifier where to create the disk.
+ type: str
+ required: true
+ version:
+ description:
+ - The TPM version to use.
+ type: str
+ choices: ['1.2', '2.0']
+ default: '2.0'
+ type: dict
+ version_added: 7.1.0
update:
description:
- - If C(true), the VM will be updated with new value.
- - Cause of the operations of the API and security reasons, I have disabled the update of the following parameters
- - C(net, virtio, ide, sata, scsi). Per example updating C(net) update the MAC address and C(virtio) create always new disk...
- - Update of C(pool) is disabled. It needs an additional API endpoint not covered by this module.
+ - If V(true), the VM will be updated with new value.
+ - Because of the operations of the API and security reasons, I have disabled the update of the following parameters
+ O(net), O(virtio), O(ide), O(sata), O(scsi). Per example updating O(net) update the MAC address and C(virtio) create always new disk...
+ This security feature can be disabled by setting the O(update_unsafe) to V(true).
+ - Update of O(pool) is disabled. It needs an additional API endpoint not covered by this module.
+ type: bool
+ default: false
+ update_unsafe:
+ description:
+ - If V(true), do not enforce limitations on parameters O(net), O(virtio), O(ide), O(sata), O(scsi), O(efidisk0), and O(tpmstate0).
+ Use this option with caution because an improper configuration might result in a permanent loss of data (e.g. disk recreated).
type: bool
default: false
+ version_added: 8.4.0
vcpus:
description:
- Sets number of hotplugged vcpus.
@@ -491,13 +540,13 @@ options:
vga:
description:
- Select VGA type. If you want to use high resolution modes (>= 1280x1024x16) then you should use option 'std' or 'vmware'.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(std).
+ - This option has no default unless O(proxmox_default_behavior) is set to V(compatibility); then the default is V(std).
type: str
choices: ['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']
virtio:
description:
- - A hash/dictionary of volume used as VIRTIO hard disk. C(virtio='{"key":"value", "key":"value"}').
- - Keys allowed are - C(virto[n]) where 0 ≤ n ≤ 15.
+ - A hash/dictionary of volume used as VIRTIO hard disk. O(virtio='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(virtio[n]) where 0 ≤ n ≤ 15.
- Values allowed are - C("storage:size,format=value").
- C(storage) is the storage identifier where to create the disk.
- C(size) is the size of the disk in GB.
@@ -514,18 +563,21 @@ options:
- As of community.general 4.0.0, various options no longer have default values.
These default values caused problems when users expected different behavior from Proxmox
by default or filled options which caused problems when set.
- - The value C(compatibility) (default before community.general 4.0.0) will ensure that the default values
- are used when the values are not explicitly specified by the user. The new default is C(no_defaults),
+ - The value V(compatibility) (default before community.general 4.0.0) will ensure that the default values
+ are used when the values are not explicitly specified by the user. The new default is V(no_defaults),
which makes sure these options have no defaults.
- - This affects the I(acpi), I(autostart), I(balloon), I(boot), I(cores), I(cpu),
- I(cpuunits), I(force), I(format), I(kvm), I(memory), I(onboot), I(ostype), I(sockets),
- I(tablet), I(template), I(vga), options.
+ - This affects the O(acpi), O(autostart), O(balloon), O(boot), O(cores), O(cpu),
+ O(cpuunits), O(force), O(format), O(kvm), O(memory), O(onboot), O(ostype), O(sockets),
+ O(tablet), O(template), and O(vga) options.
+ - This option is deprecated and will be removed in community.general 10.0.0.
type: str
default: no_defaults
choices:
- compatibility
- no_defaults
version_added: "1.3.0"
+seealso:
+ - module: community.general.proxmox_vm_info
extends_documentation_fragment:
- community.general.proxmox.documentation
- community.general.proxmox.selection
@@ -754,6 +806,25 @@ EXAMPLES = '''
node: sabrewulf
state: restarted
+- name: Convert VM to template
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: template
+
+- name: Convert VM to template (stop VM if running)
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: template
+ force: true
+
- name: Remove VM
community.general.proxmox_kvm:
api_user: root@pam
@@ -763,6 +834,15 @@ EXAMPLES = '''
node: sabrewulf
state: absent
+- name: Get VM current state
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: current
+
- name: Update VM configuration
community.general.proxmox_kvm:
api_user: root@pam
@@ -774,6 +854,20 @@ EXAMPLES = '''
memory: 16384
update: true
+- name: Update VM configuration (incl. unsafe options)
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ cores: 8
+ memory: 16384
+ net:
+ net0: virtio,bridge=vmbr1
+ update: true
+ update_unsafe: true
+
- name: Delete QEMU parameters
community.general.proxmox_kvm:
api_user: root@pam
@@ -791,6 +885,26 @@ EXAMPLES = '''
name: spynal
node: sabrewulf
revert: 'template,cpulimit'
+
+- name: Migrate VM on second node
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf-2
+ migrate: true
+
+- name: Add hookscript to existing VM
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ vmid: 999
+ node: sabrewulf
+ hookscript: local:snippets/hookscript.pl
+ update: true
+
'''
RETURN = '''
@@ -874,6 +988,9 @@ class ProxmoxKvmAnsible(ProxmoxAnsible):
def wait_for_task(self, node, taskid):
timeout = self.module.params['timeout']
+ if self.module.params['state'] == 'stopped':
+ # Increase task timeout in case of stopped state to be sure it waits longer than VM stop operation itself
+ timeout += 10
while timeout:
if self.api_task_ok(node, taskid):
@@ -886,12 +1003,12 @@ class ProxmoxKvmAnsible(ProxmoxAnsible):
time.sleep(1)
return False
- def create_vm(self, vmid, newid, node, name, memory, cpu, cores, sockets, update, **kwargs):
+ def create_vm(self, vmid, newid, node, name, memory, cpu, cores, sockets, update, update_unsafe, **kwargs):
# Available only in PVE 4
only_v4 = ['force', 'protection', 'skiplock']
only_v6 = ['ciuser', 'cipassword', 'sshkeys', 'ipconfig', 'tags']
- # valide clone parameters
+ # valid clone parameters
valid_clone_params = ['format', 'full', 'pool', 'snapname', 'storage', 'target']
clone_params = {}
# Default args for vm. Note: -args option is for experts only. It allows you to pass arbitrary arguments to kvm.
@@ -923,21 +1040,24 @@ class ProxmoxKvmAnsible(ProxmoxAnsible):
urlencoded_ssh_keys = quote(kwargs['sshkeys'], safe='')
kwargs['sshkeys'] = str(urlencoded_ssh_keys)
- # If update, don't update disk (virtio, efidisk0, ide, sata, scsi) and network interface
+ # If update, don't update disk (virtio, efidisk0, tpmstate0, ide, sata, scsi) and network interface, unless update_unsafe=True
# pool parameter not supported by qemu/<vmid>/config endpoint on "update" (PVE 6.2) - only with "create"
if update:
- if 'virtio' in kwargs:
- del kwargs['virtio']
- if 'sata' in kwargs:
- del kwargs['sata']
- if 'scsi' in kwargs:
- del kwargs['scsi']
- if 'ide' in kwargs:
- del kwargs['ide']
- if 'efidisk0' in kwargs:
- del kwargs['efidisk0']
- if 'net' in kwargs:
- del kwargs['net']
+ if update_unsafe is False:
+ if 'virtio' in kwargs:
+ del kwargs['virtio']
+ if 'sata' in kwargs:
+ del kwargs['sata']
+ if 'scsi' in kwargs:
+ del kwargs['scsi']
+ if 'ide' in kwargs:
+ del kwargs['ide']
+ if 'efidisk0' in kwargs:
+ del kwargs['efidisk0']
+ if 'tpmstate0' in kwargs:
+ del kwargs['tpmstate0']
+ if 'net' in kwargs:
+ del kwargs['net']
if 'force' in kwargs:
del kwargs['force']
if 'pool' in kwargs:
@@ -951,7 +1071,7 @@ class ProxmoxKvmAnsible(ProxmoxAnsible):
# Flatten efidisk0 option to a string so that it's a string which is what Proxmoxer and the API expect
if 'efidisk0' in kwargs:
efidisk0_str = ''
- # Regexp to catch underscores in keys name, to replace them after by hypens
+ # Regexp to catch underscores in keys name, to replace them after by hyphens
hyphen_re = re.compile(r'_')
# If present, the storage definition should be the first argument
if 'storage' in kwargs['efidisk0']:
@@ -963,6 +1083,13 @@ class ProxmoxKvmAnsible(ProxmoxAnsible):
if 'storage' != k])
kwargs['efidisk0'] = efidisk0_str
+ # Flatten tpmstate0 option to a string so that it's a string which is what Proxmoxer and the API expect
+ if 'tpmstate0' in kwargs:
+ kwargs['tpmstate0'] = '{storage}:1,version=v{version}'.format(
+ storage=kwargs['tpmstate0'].get('storage'),
+ version=kwargs['tpmstate0'].get('version')
+ )
+
# Convert all dict in kwargs to elements.
# For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n], ipconfig[n]
for k in list(kwargs.keys()):
@@ -1043,16 +1170,53 @@ class ProxmoxKvmAnsible(ProxmoxAnsible):
return False
return True
- def stop_vm(self, vm, force):
+ def stop_vm(self, vm, force, timeout):
vmid = vm['vmid']
proxmox_node = self.proxmox_api.nodes(vm['node'])
- taskid = proxmox_node.qemu(vmid).status.shutdown.post(forceStop=(1 if force else 0))
+ taskid = proxmox_node.qemu(vmid).status.shutdown.post(forceStop=(1 if force else 0), timeout=timeout)
if not self.wait_for_task(vm['node'], taskid):
self.module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' %
proxmox_node.tasks(taskid).log.get()[:1])
return False
return True
+ def restart_vm(self, vm, force, **status):
+ vmid = vm['vmid']
+ try:
+ proxmox_node = self.proxmox_api.nodes(vm['node'])
+ taskid = proxmox_node.qemu(vmid).status.reset.post() if force else proxmox_node.qemu(vmid).status.reboot.post()
+ if not self.wait_for_task(vm['node'], taskid):
+ self.module.fail_json(msg='Reached timeout while waiting for rebooting VM. Last line in task before timeout: %s' %
+ proxmox_node.tasks(taskid).log.get()[:1])
+ return False
+ return True
+ except Exception as e:
+ self.module.fail_json(vmid=vmid, msg="restarting of VM %s failed with exception: %s" % (vmid, e))
+ return False
+
+ def convert_to_template(self, vm, timeout, force):
+ vmid = vm['vmid']
+ try:
+ proxmox_node = self.proxmox_api.nodes(vm['node'])
+ if proxmox_node.qemu(vmid).status.current.get()['status'] == 'running' and force:
+ self.stop_instance(vm, vmid, timeout, force)
+ # not sure why, but templating a container doesn't return a taskid
+ proxmox_node.qemu(vmid).template.post()
+ return True
+ except Exception as e:
+ self.module.fail_json(vmid=vmid, msg="conversion of VM %s to template failed with exception: %s" % (vmid, e))
+ return False
+
+ def migrate_vm(self, vm, target_node):
+ vmid = vm['vmid']
+ proxmox_node = self.proxmox_api.nodes(vm['node'])
+ taskid = proxmox_node.qemu(vmid).migrate.post(vmid=vmid, node=vm['node'], target=target_node, online=1)
+ if not self.wait_for_task(vm['node'], taskid):
+ self.module.fail_json(msg='Reached timeout while waiting for migrating VM. Last line in task before timeout: %s' %
+ proxmox_node.tasks(taskid).log.get()[:1])
+ return False
+ return True
+
def main():
module_args = proxmox_auth_argument_spec()
@@ -1089,6 +1253,7 @@ def main():
format=dict(type='str', choices=['cloop', 'cow', 'qcow', 'qcow2', 'qed', 'raw', 'vmdk', 'unspecified']),
freeze=dict(type='bool'),
full=dict(type='bool', default=True),
+ hookscript=dict(type='str'),
hostpci=dict(type='dict'),
hotplug=dict(type='str'),
hugepages=dict(choices=['any', '2', '1024']),
@@ -1100,6 +1265,7 @@ def main():
lock=dict(choices=['migrate', 'backup', 'snapshot', 'rollback']),
machine=dict(type='str'),
memory=dict(type='int'),
+ migrate=dict(type='bool', default=False),
migrate_downtime=dict(type='int'),
migrate_speed=dict(type='int'),
name=dict(type='str'),
@@ -1129,7 +1295,7 @@ def main():
sshkeys=dict(type='str', no_log=False),
startdate=dict(type='str'),
startup=dict(),
- state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted', 'current']),
+ state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted', 'current', 'template']),
storage=dict(type='str'),
tablet=dict(type='bool'),
tags=dict(type='list', elements='str'),
@@ -1137,13 +1303,23 @@ def main():
tdf=dict(type='bool'),
template=dict(type='bool'),
timeout=dict(type='int', default=30),
+ tpmstate0=dict(type='dict',
+ options=dict(
+ storage=dict(type='str', required=True),
+ version=dict(type='str', choices=['2.0', '1.2'], default='2.0')
+ )),
update=dict(type='bool', default=False),
+ update_unsafe=dict(type='bool', default=False),
vcpus=dict(type='int'),
vga=dict(choices=['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']),
virtio=dict(type='dict'),
vmid=dict(type='int'),
watchdog=dict(),
- proxmox_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']),
+ proxmox_default_behavior=dict(type='str',
+ default='no_defaults',
+ choices=['compatibility', 'no_defaults'],
+ removed_from_collection='community.general',
+ removed_in_version='10.0.0'),
)
module_args.update(kvm_args)
@@ -1159,6 +1335,7 @@ def main():
cpu = module.params['cpu']
cores = module.params['cores']
delete = module.params['delete']
+ migrate = module.params['migrate']
memory = module.params['memory']
name = module.params['name']
newid = module.params['newid']
@@ -1167,6 +1344,7 @@ def main():
sockets = module.params['sockets']
state = module.params['state']
update = bool(module.params['update'])
+ update_unsafe = bool(module.params['update_unsafe'])
vmid = module.params['vmid']
validate_certs = module.params['validate_certs']
@@ -1200,11 +1378,15 @@ def main():
# If vmid is not defined then retrieve its value from the vm name,
# the cloned vm name or retrieve the next free VM id from ProxmoxAPI.
if not vmid:
- if state == 'present' and not update and not clone and not delete and not revert:
- try:
- vmid = proxmox.get_nextvmid()
- except Exception:
- module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name))
+ if state == 'present' and not update and not clone and not delete and not revert and not migrate:
+ existing_vmid = proxmox.get_vmid(name, ignore_missing=True)
+ if existing_vmid:
+ vmid = existing_vmid
+ else:
+ try:
+ vmid = proxmox.get_nextvmid()
+ except Exception:
+ module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name))
else:
clone_target = clone or name
vmid = proxmox.get_vmid(clone_target, ignore_missing=True)
@@ -1247,20 +1429,32 @@ def main():
except Exception as e:
module.fail_json(vmid=vmid, msg='Unable to revert settings on VM {0} with vmid {1}: Maybe is not a pending task... '.format(name, vmid) + str(e))
+ if migrate:
+ try:
+ vm = proxmox.get_vm(vmid)
+ vm_node = vm['node']
+ if node != vm_node:
+ proxmox.migrate_vm(vm, node)
+ module.exit_json(changed=True, vmid=vmid, msg="VM {0} has been migrated from {1} to {2}".format(vmid, vm_node, node))
+ else:
+ module.exit_json(changed=False, vmid=vmid, msg="VM {0} is already on {1}".format(vmid, node))
+ except Exception as e:
+ module.fail_json(vmid=vmid, msg='Unable to migrate VM {0} from {1} to {2}: {3}'.format(vmid, vm_node, node, e))
+
if state == 'present':
+ if not (update or clone) and proxmox.get_vm(vmid, ignore_missing=True):
+ module.exit_json(changed=False, vmid=vmid, msg="VM with vmid <%s> already exists" % vmid)
+ elif not (update or clone or vmid) and proxmox.get_vmid(name, ignore_missing=True):
+ module.exit_json(changed=False, vmid=proxmox.get_vmid(name), msg="VM with name <%s> already exists" % name)
+ elif not node:
+ module.fail_json(msg='node is mandatory for creating/updating VM')
+ elif update and not any([vmid, name]):
+ module.fail_json(msg='vmid or name is mandatory for updating VM')
+ elif not proxmox.get_node(node):
+ module.fail_json(msg="node '%s' does not exist in cluster" % node)
+
try:
- if proxmox.get_vm(vmid, ignore_missing=True) and not (update or clone):
- module.exit_json(changed=False, vmid=vmid, msg="VM with vmid <%s> already exists" % vmid)
- elif proxmox.get_vmid(name, ignore_missing=True) and not (update or clone):
- module.exit_json(changed=False, vmid=proxmox.get_vmid(name), msg="VM with name <%s> already exists" % name)
- elif not node:
- module.fail.json(msg='node is mandatory for creating/updating VM')
- elif update and not any([vmid, name]):
- module.fail_json(msg='vmid or name is mandatory for updating VM')
- elif not proxmox.get_node(node):
- module.fail_json(msg="node '%s' does not exist in cluster" % node)
-
- proxmox.create_vm(vmid, newid, node, name, memory, cpu, cores, sockets, update,
+ proxmox.create_vm(vmid, newid, node, name, memory, cpu, cores, sockets, update, update_unsafe,
archive=module.params['archive'],
acpi=module.params['acpi'],
agent=module.params['agent'],
@@ -1280,6 +1474,7 @@ def main():
efidisk0=module.params['efidisk0'],
force=module.params['force'],
freeze=module.params['freeze'],
+ hookscript=module.params['hookscript'],
hostpci=module.params['hostpci'],
hotplug=module.params['hotplug'],
hugepages=module.params['hugepages'],
@@ -1317,6 +1512,7 @@ def main():
target=module.params['target'],
tdf=module.params['tdf'],
template=module.params['template'],
+ tpmstate0=module.params['tpmstate0'],
vcpus=module.params['vcpus'],
vga=module.params['vga'],
virtio=module.params['virtio'],
@@ -1329,12 +1525,6 @@ def main():
sata=module.params['sata'],
scsi=module.params['scsi'],
virtio=module.params['virtio'])
- if update:
- module.exit_json(changed=True, vmid=vmid, msg="VM %s with vmid %s updated" % (name, vmid))
- elif clone is not None:
- module.exit_json(changed=True, vmid=newid, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid))
- else:
- module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results)
except Exception as e:
if update:
module.fail_json(vmid=vmid, msg="Unable to update vm {0} with vmid {1}=".format(name, vmid) + str(e))
@@ -1343,14 +1533,23 @@ def main():
else:
module.fail_json(vmid=vmid, msg="creation of qemu VM %s with vmid %s failed with exception=%s" % (name, vmid, e))
+ if update:
+ module.exit_json(changed=True, vmid=vmid, msg="VM %s with vmid %s updated" % (name, vmid))
+ elif clone is not None:
+ module.exit_json(changed=True, vmid=newid, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid))
+ else:
+ module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results)
+
elif state == 'started':
+ if not vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
+
status = {}
try:
- if not vmid:
- module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
vm = proxmox.get_vm(vmid)
- status['status'] = vm['status']
- if vm['status'] == 'running':
+ current = proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).status.current.get()['status']
+ status['status'] = current
+ if current == 'running':
module.exit_json(changed=False, vmid=vmid, msg="VM %s is already running" % vmid, **status)
if proxmox.start_vm(vm):
@@ -1359,52 +1558,68 @@ def main():
module.fail_json(vmid=vmid, msg="starting of VM %s failed with exception: %s" % (vmid, e), **status)
elif state == 'stopped':
+ if not vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
+
status = {}
try:
- if not vmid:
- module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
-
vm = proxmox.get_vm(vmid)
-
- status['status'] = vm['status']
- if vm['status'] == 'stopped':
+ current = proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).status.current.get()['status']
+ status['status'] = current
+ if current == 'stopped':
module.exit_json(changed=False, vmid=vmid, msg="VM %s is already stopped" % vmid, **status)
- if proxmox.stop_vm(vm, force=module.params['force']):
- module.exit_json(changed=True, vmid=vmid, msg="VM %s is shutting down" % vmid, **status)
+ proxmox.stop_vm(vm, force=module.params['force'], timeout=module.params['timeout'])
+ module.exit_json(changed=True, vmid=vmid, msg="VM %s is shutting down" % vmid, **status)
except Exception as e:
module.fail_json(vmid=vmid, msg="stopping of VM %s failed with exception: %s" % (vmid, e), **status)
- elif state == 'restarted':
+ elif state == 'template':
+ if not vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
+
status = {}
try:
- if not vmid:
- module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
-
vm = proxmox.get_vm(vmid)
- status['status'] = vm['status']
- if vm['status'] == 'stopped':
- module.exit_json(changed=False, vmid=vmid, msg="VM %s is not running" % vmid, **status)
- if proxmox.stop_vm(vm, force=module.params['force']) and proxmox.start_vm(vm):
- module.exit_json(changed=True, vmid=vmid, msg="VM %s is restarted" % vmid, **status)
+ if vm['template'] == 1:
+ module.exit_json(changed=False, vmid=vmid, msg="VM %s is already a template" % vmid, **status)
+
+ if proxmox.convert_to_template(vm, force=module.params['force'], timeout=module.params['timeout']):
+ module.exit_json(changed=True, vmid=vmid, msg="VM %s is converting to template" % vmid, **status)
except Exception as e:
- module.fail_json(vmid=vmid, msg="restarting of VM %s failed with exception: %s" % (vmid, e), **status)
+ module.fail_json(vmid=vmid, msg="conversion of VM %s to template failed with exception: %s" % (vmid, e), **status)
+
+ elif state == 'restarted':
+ if not vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
+
+ status = {}
+ vm = proxmox.get_vm(vmid)
+ current = proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).status.current.get()['status']
+ status['status'] = current
+ if current == 'stopped':
+ module.exit_json(changed=False, vmid=vmid, msg="VM %s is not running" % vmid, **status)
+
+ if proxmox.restart_vm(vm, force=module.params['force']):
+ module.exit_json(changed=True, vmid=vmid, msg="VM %s is restarted" % vmid, **status)
elif state == 'absent':
status = {}
if not vmid:
module.exit_json(changed=False, msg='VM with name = %s is already absent' % name)
+
try:
vm = proxmox.get_vm(vmid, ignore_missing=True)
if not vm:
module.exit_json(changed=False, vmid=vmid)
proxmox_node = proxmox.proxmox_api.nodes(vm['node'])
- status['status'] = vm['status']
- if vm['status'] == 'running':
+ current = proxmox_node.qemu(vmid).status.current.get()['status']
+ status['status'] = current
+ if current == 'running':
if module.params['force']:
- proxmox.stop_vm(vm, True)
+ proxmox.stop_vm(vm, True, timeout=module.params['timeout'])
else:
module.exit_json(changed=False, vmid=vmid, msg="VM %s is running. Stop it before deletion or use force=true." % vmid)
taskid = proxmox_node.qemu.delete(vmid)
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_nic.py b/ansible_collections/community/general/plugins/modules/proxmox_nic.py
index 26d07c7ec..9afe49447 100644
--- a/ansible_collections/community/general/plugins/modules/proxmox_nic.py
+++ b/ansible_collections/community/general/plugins/modules/proxmox_nic.py
@@ -24,7 +24,7 @@ attributes:
options:
bridge:
description:
- - Add this interface to the specified bridge device. The Proxmox VE default bridge is called C(vmbr0).
+ - Add this interface to the specified bridge device. The Proxmox VE default bridge is called V(vmbr0).
type: str
firewall:
description:
@@ -33,7 +33,7 @@ options:
default: false
interface:
description:
- - Name of the interface, should be C(net[n]) where C(1 ≤ n ≤ 31).
+ - Name of the interface, should be V(net[n]) where C(1 ≤ n ≤ 31).
type: str
required: true
link_down:
@@ -43,7 +43,7 @@ options:
default: false
mac:
description:
- - C(XX:XX:XX:XX:XX:XX) should be a unique MAC address. This is automatically generated if not specified.
+ - V(XX:XX:XX:XX:XX:XX) should be a unique MAC address. This is automatically generated if not specified.
- When not specified this module will keep the MAC address the same when changing an existing interface.
type: str
model:
@@ -56,13 +56,13 @@ options:
mtu:
description:
- Force MTU, for C(virtio) model only, setting will be ignored otherwise.
- - Set to C(1) to use the bridge MTU.
+ - Set to V(1) to use the bridge MTU.
- Value should be C(1 ≤ n ≤ 65520).
type: int
name:
description:
- Specifies the VM name. Only used on the configuration web interface.
- - Required only for I(state=present).
+ - Required only for O(state=present).
type: str
queues:
description:
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_node_info.py b/ansible_collections/community/general/plugins/modules/proxmox_node_info.py
new file mode 100644
index 000000000..82ef7aa38
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/proxmox_node_info.py
@@ -0,0 +1,140 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright John Berninger (@jberning) <john.berninger at gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: proxmox_node_info
+short_description: Retrieve information about one or more Proxmox VE nodes
+version_added: 8.2.0
+description:
+ - Retrieve information about one or more Proxmox VE nodes.
+author: John Berninger (@jwbernin)
+extends_documentation_fragment:
+ - community.general.proxmox.documentation
+ - community.general.attributes
+ - community.general.attributes.info_module
+'''
+
+
+EXAMPLES = '''
+- name: List existing nodes
+ community.general.proxmox_node_info:
+ api_host: proxmox1
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ register: proxmox_nodes
+'''
+
+
+RETURN = '''
+proxmox_nodes:
+ description: List of Proxmox VE nodes.
+ returned: always, but can be empty
+ type: list
+ elements: dict
+ contains:
+ cpu:
+ description: Current CPU usage in fractional shares of this host's total available CPU.
+ returned: on success
+ type: float
+ disk:
+ description: Current local disk usage of this host.
+ returned: on success
+ type: int
+ id:
+ description: Identity of the node.
+ returned: on success
+ type: str
+ level:
+ description: Support level. Can be blank if not under a paid support contract.
+ returned: on success
+ type: str
+ maxcpu:
+ description: Total number of available CPUs on this host.
+ returned: on success
+ type: int
+ maxdisk:
+ description: Size of local disk in bytes.
+ returned: on success
+ type: int
+ maxmem:
+ description: Memory size in bytes.
+ returned: on success
+ type: int
+ mem:
+ description: Used memory in bytes.
+ returned: on success
+ type: int
+ node:
+ description: Short hostname of this node.
+ returned: on success
+ type: str
+ ssl_fingerprint:
+ description: SSL fingerprint of the node certificate.
+ returned: on success
+ type: str
+ status:
+ description: Node status.
+ returned: on success
+ type: str
+ type:
+ description: Object type being returned.
+ returned: on success
+ type: str
+ uptime:
+ description: Node uptime in seconds.
+ returned: on success
+ type: int
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.proxmox import (
+ proxmox_auth_argument_spec, ProxmoxAnsible)
+
+
+class ProxmoxNodeInfoAnsible(ProxmoxAnsible):
+ def get_nodes(self):
+ nodes = self.proxmox_api.nodes.get()
+ return nodes
+
+
+def proxmox_node_info_argument_spec():
+ return dict()
+
+
+def main():
+ module_args = proxmox_auth_argument_spec()
+ node_info_args = proxmox_node_info_argument_spec()
+ module_args.update(node_info_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_one_of=[('api_password', 'api_token_id')],
+ required_together=[('api_token_id', 'api_token_secret')],
+ supports_check_mode=True,
+ )
+ result = dict(
+ changed=False
+ )
+
+ proxmox = ProxmoxNodeInfoAnsible(module)
+
+ nodes = proxmox.get_nodes()
+ result['proxmox_nodes'] = nodes
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_pool.py b/ansible_collections/community/general/plugins/modules/proxmox_pool.py
new file mode 100644
index 000000000..704632070
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/proxmox_pool.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2023, Sergei Antipov (UnderGreen) <greendayonfire@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: proxmox_pool
+short_description: Pool management for Proxmox VE cluster
+description:
+ - Create or delete a pool for Proxmox VE clusters.
+ - For pool members management please consult M(community.general.proxmox_pool_member) module.
+version_added: 7.1.0
+author: "Sergei Antipov (@UnderGreen) <greendayonfire@gmail.com>"
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ poolid:
+ description:
+ - The pool ID.
+ type: str
+ aliases: [ "name" ]
+ required: true
+ state:
+ description:
+ - Indicate desired state of the pool.
+ - The pool must be empty prior deleting it with O(state=absent).
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ comment:
+ description:
+ - Specify the description for the pool.
+ - Parameter is ignored when pool already exists or O(state=absent).
+ type: str
+
+extends_documentation_fragment:
+ - community.general.proxmox.documentation
+ - community.general.attributes
+"""
+
+EXAMPLES = """
+- name: Create new Proxmox VE pool
+ community.general.proxmox_pool:
+ api_host: node1
+ api_user: root@pam
+ api_password: password
+ poolid: test
+ comment: 'New pool'
+
+- name: Delete the Proxmox VE pool
+ community.general.proxmox_pool:
+ api_host: node1
+ api_user: root@pam
+ api_password: password
+ poolid: test
+ state: absent
+"""
+
+RETURN = """
+poolid:
+ description: The pool ID.
+ returned: success
+ type: str
+ sample: test
+msg:
+ description: A short message on what the module did.
+ returned: always
+ type: str
+ sample: "Pool test successfully created"
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible)
+
+
+class ProxmoxPoolAnsible(ProxmoxAnsible):
+
+ def is_pool_existing(self, poolid):
+ """Check whether pool already exist
+
+ :param poolid: str - name of the pool
+ :return: bool - is pool exists?
+ """
+ try:
+ pools = self.proxmox_api.pools.get()
+ for pool in pools:
+ if pool['poolid'] == poolid:
+ return True
+ return False
+ except Exception as e:
+ self.module.fail_json(msg="Unable to retrieve pools: {0}".format(e))
+
+ def is_pool_empty(self, poolid):
+ """Check whether pool has members
+
+ :param poolid: str - name of the pool
+ :return: bool - is pool empty?
+ """
+ return True if not self.get_pool(poolid)['members'] else False
+
+ def create_pool(self, poolid, comment=None):
+ """Create Proxmox VE pool
+
+ :param poolid: str - name of the pool
+ :param comment: str, optional - Description of a pool
+ :return: None
+ """
+ if self.is_pool_existing(poolid):
+ self.module.exit_json(changed=False, poolid=poolid, msg="Pool {0} already exists".format(poolid))
+
+ if self.module.check_mode:
+ return
+
+ try:
+ self.proxmox_api.pools.post(poolid=poolid, comment=comment)
+ except Exception as e:
+ self.module.fail_json(msg="Failed to create pool with ID {0}: {1}".format(poolid, e))
+
+ def delete_pool(self, poolid):
+ """Delete Proxmox VE pool
+
+ :param poolid: str - name of the pool
+ :return: None
+ """
+ if not self.is_pool_existing(poolid):
+ self.module.exit_json(changed=False, poolid=poolid, msg="Pool {0} doesn't exist".format(poolid))
+
+ if self.is_pool_empty(poolid):
+ if self.module.check_mode:
+ return
+
+ try:
+ self.proxmox_api.pools(poolid).delete()
+ except Exception as e:
+ self.module.fail_json(msg="Failed to delete pool with ID {0}: {1}".format(poolid, e))
+ else:
+ self.module.fail_json(msg="Can't delete pool {0} with members. Please remove members from pool first.".format(poolid))
+
+
+def main():
+ module_args = proxmox_auth_argument_spec()
+ pools_args = dict(
+ poolid=dict(type="str", aliases=["name"], required=True),
+ comment=dict(type="str"),
+ state=dict(default="present", choices=["present", "absent"]),
+ )
+
+ module_args.update(pools_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_together=[("api_token_id", "api_token_secret")],
+ required_one_of=[("api_password", "api_token_id")],
+ supports_check_mode=True
+ )
+
+ poolid = module.params["poolid"]
+ comment = module.params["comment"]
+ state = module.params["state"]
+
+ proxmox = ProxmoxPoolAnsible(module)
+
+ if state == "present":
+ proxmox.create_pool(poolid, comment)
+ module.exit_json(changed=True, poolid=poolid, msg="Pool {0} successfully created".format(poolid))
+ else:
+ proxmox.delete_pool(poolid)
+ module.exit_json(changed=True, poolid=poolid, msg="Pool {0} successfully deleted".format(poolid))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_pool_member.py b/ansible_collections/community/general/plugins/modules/proxmox_pool_member.py
new file mode 100644
index 000000000..7d6b24949
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/proxmox_pool_member.py
@@ -0,0 +1,238 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2023, Sergei Antipov (UnderGreen) <greendayonfire@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: proxmox_pool_member
+short_description: Add or delete members from Proxmox VE cluster pools
+description:
+ - Create or delete a pool member in Proxmox VE clusters.
+version_added: 7.1.0
+author: "Sergei Antipov (@UnderGreen) <greendayonfire@gmail.com>"
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+options:
+ poolid:
+ description:
+ - The pool ID.
+ type: str
+ aliases: [ "name" ]
+ required: true
+ member:
+ description:
+ - Specify the member name.
+ - For O(type=storage) it is a storage name.
+ - For O(type=vm) either vmid or vm name could be used.
+ type: str
+ required: true
+ type:
+ description:
+ - Member type to add/remove from the pool.
+ choices: ["vm", "storage"]
+ default: vm
+ type: str
+ state:
+ description:
+ - Indicate desired state of the pool member.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+
+extends_documentation_fragment:
+ - community.general.proxmox.documentation
+ - community.general.attributes
+"""
+
+EXAMPLES = """
+- name: Add new VM to Proxmox VE pool
+ community.general.proxmox_pool_member:
+ api_host: node1
+ api_user: root@pam
+ api_password: password
+ poolid: test
+ member: 101
+
+- name: Add new storage to Proxmox VE pool
+ community.general.proxmox_pool_member:
+ api_host: node1
+ api_user: root@pam
+ api_password: password
+ poolid: test
+ member: zfs-data
+ type: storage
+
+- name: Remove VM from the Proxmox VE pool using VM name
+ community.general.proxmox_pool_member:
+ api_host: node1
+ api_user: root@pam
+ api_password: password
+ poolid: test
+ member: pxe.home.arpa
+ state: absent
+
+- name: Remove storage from the Proxmox VE pool
+ community.general.proxmox_pool_member:
+ api_host: node1
+ api_user: root@pam
+ api_password: password
+ poolid: test
+ member: zfs-storage
+ type: storage
+ state: absent
+"""
+
+RETURN = """
+poolid:
+ description: The pool ID.
+ returned: success
+ type: str
+ sample: test
+member:
+ description: Member name.
+ returned: success
+ type: str
+ sample: 101
+msg:
+ description: A short message on what the module did.
+ returned: always
+ type: str
+ sample: "Member 101 deleted from the pool test"
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible)
+
+
+class ProxmoxPoolMemberAnsible(ProxmoxAnsible):
+
+ def pool_members(self, poolid):
+ vms = []
+ storage = []
+ for member in self.get_pool(poolid)["members"]:
+ if member["type"] == "storage":
+ storage.append(member["storage"])
+ else:
+ vms.append(member["vmid"])
+
+ return (vms, storage)
+
+ def add_pool_member(self, poolid, member, member_type):
+ current_vms_members, current_storage_members = self.pool_members(poolid)
+ all_members_before = current_storage_members + current_vms_members
+ all_members_after = all_members_before.copy()
+ diff = {"before": {"members": all_members_before}, "after": {"members": all_members_after}}
+
+ try:
+ if member_type == "storage":
+ storages = self.get_storages(type=None)
+ if member not in [storage["storage"] for storage in storages]:
+ self.module.fail_json(msg="Storage {0} doesn't exist in the cluster".format(member))
+ if member in current_storage_members:
+ self.module.exit_json(changed=False, poolid=poolid, member=member,
+ diff=diff, msg="Member {0} is already part of the pool {1}".format(member, poolid))
+
+ all_members_after.append(member)
+ if self.module.check_mode:
+ return diff
+
+ self.proxmox_api.pools(poolid).put(storage=[member])
+ return diff
+ else:
+ try:
+ vmid = int(member)
+ except ValueError:
+ vmid = self.get_vmid(member)
+
+ if vmid in current_vms_members:
+ self.module.exit_json(changed=False, poolid=poolid, member=member,
+ diff=diff, msg="VM {0} is already part of the pool {1}".format(member, poolid))
+
+ all_members_after.append(member)
+
+ if not self.module.check_mode:
+ self.proxmox_api.pools(poolid).put(vms=[vmid])
+ return diff
+ except Exception as e:
+ self.module.fail_json(msg="Failed to add a new member ({0}) to the pool {1}: {2}".format(member, poolid, e))
+
+ def delete_pool_member(self, poolid, member, member_type):
+ current_vms_members, current_storage_members = self.pool_members(poolid)
+ all_members_before = current_storage_members + current_vms_members
+ all_members_after = all_members_before.copy()
+ diff = {"before": {"members": all_members_before}, "after": {"members": all_members_after}}
+
+ try:
+ if member_type == "storage":
+ if member not in current_storage_members:
+ self.module.exit_json(changed=False, poolid=poolid, member=member,
+ diff=diff, msg="Member {0} is not part of the pool {1}".format(member, poolid))
+
+ all_members_after.remove(member)
+ if self.module.check_mode:
+ return diff
+
+ self.proxmox_api.pools(poolid).put(storage=[member], delete=1)
+ return diff
+ else:
+ try:
+ vmid = int(member)
+ except ValueError:
+ vmid = self.get_vmid(member)
+
+ if vmid not in current_vms_members:
+ self.module.exit_json(changed=False, poolid=poolid, member=member,
+ diff=diff, msg="VM {0} is not part of the pool {1}".format(member, poolid))
+
+ all_members_after.remove(vmid)
+
+ if not self.module.check_mode:
+ self.proxmox_api.pools(poolid).put(vms=[vmid], delete=1)
+ return diff
+ except Exception as e:
+ self.module.fail_json(msg="Failed to delete a member ({0}) from the pool {1}: {2}".format(member, poolid, e))
+
+
+def main():
+ module_args = proxmox_auth_argument_spec()
+ pool_members_args = dict(
+ poolid=dict(type="str", aliases=["name"], required=True),
+ member=dict(type="str", required=True),
+ type=dict(default="vm", choices=["vm", "storage"]),
+ state=dict(default="present", choices=["present", "absent"]),
+ )
+
+ module_args.update(pool_members_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_together=[("api_token_id", "api_token_secret")],
+ required_one_of=[("api_password", "api_token_id")],
+ supports_check_mode=True
+ )
+
+ poolid = module.params["poolid"]
+ member = module.params["member"]
+ member_type = module.params["type"]
+ state = module.params["state"]
+
+ proxmox = ProxmoxPoolMemberAnsible(module)
+
+ if state == "present":
+ diff = proxmox.add_pool_member(poolid, member, member_type)
+ module.exit_json(changed=True, poolid=poolid, member=member, diff=diff, msg="New member {0} added to the pool {1}".format(member, poolid))
+ else:
+ diff = proxmox.delete_pool_member(poolid, member, member_type)
+ module.exit_json(changed=True, poolid=poolid, member=member, diff=diff, msg="Member {0} deleted from the pool {1}".format(member, poolid))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_snap.py b/ansible_collections/community/general/plugins/modules/proxmox_snap.py
index 0c17f8376..4991423c2 100644
--- a/ansible_collections/community/general/plugins/modules/proxmox_snap.py
+++ b/ansible_collections/community/general/plugins/modules/proxmox_snap.py
@@ -34,7 +34,7 @@ options:
state:
description:
- Indicate desired state of the instance snapshot.
- - The C(rollback) value was added in community.general 4.8.0.
+ - The V(rollback) value was added in community.general 4.8.0.
choices: ['present', 'absent', 'rollback']
default: present
type: str
@@ -49,7 +49,7 @@ options:
- Allows to snapshot a container even if it has configured mountpoints.
- Temporarily disables all configured mountpoints, takes snapshot, and finally restores original configuration.
- If running, the container will be stopped and restarted to apply config changes.
- - Due to restrictions in the Proxmox API this option can only be used authenticating as C(root@pam) with I(api_password), API tokens do not work either.
+ - Due to restrictions in the Proxmox API this option can only be used authenticating as V(root@pam) with O(api_password), API tokens do not work either.
- See U(https://pve.proxmox.com/pve-docs/api-viewer/#/nodes/{node}/lxc/{vmid}/config) (PUT tab) for more details.
default: false
type: bool
@@ -74,10 +74,19 @@ options:
- Name of the snapshot that has to be created/deleted/restored.
default: 'ansible_snap'
type: str
+ retention:
+ description:
+ - Remove old snapshots if there are more than O(retention) snapshots.
+ - If O(retention) is set to V(0), all snapshots will be kept.
+ - This is only used when O(state=present) and when an actual snapshot is created.
+ If no snapshot is created, all existing snapshots will be kept.
+ default: 0
+ type: int
+ version_added: 7.1.0
notes:
- Requires proxmoxer and requests modules on host. These modules can be installed with pip.
-requirements: [ "proxmoxer", "python >= 2.7", "requests" ]
+requirements: [ "proxmoxer", "requests" ]
author: Jeffrey van Pelt (@Thulium-Drake)
extends_documentation_fragment:
- community.general.proxmox.documentation
@@ -94,6 +103,16 @@ EXAMPLES = r'''
state: present
snapname: pre-updates
+- name: Create new container snapshot and keep only the 2 newest snapshots
+ community.general.proxmox_snap:
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ vmid: 100
+ state: present
+ snapname: snapshot-42
+ retention: 2
+
- name: Create new snapshot for a container with configured mountpoints
community.general.proxmox_snap:
api_user: root@pam
@@ -190,7 +209,15 @@ class ProxmoxSnapAnsible(ProxmoxAnsible):
time.sleep(1)
return False
- def snapshot_create(self, vm, vmid, timeout, snapname, description, vmstate, unbind):
+ def snapshot_retention(self, vm, vmid, retention):
+ # ignore the last snapshot, which is the current state
+ snapshots = self.snapshot(vm, vmid).get()[:-1]
+ if retention > 0 and len(snapshots) > retention:
+ # sort by age, oldest first
+ for snap in sorted(snapshots, key=lambda x: x['snaptime'])[:len(snapshots) - retention]:
+ self.snapshot(vm, vmid)(snap['name']).delete()
+
+ def snapshot_create(self, vm, vmid, timeout, snapname, description, vmstate, unbind, retention):
if self.module.check_mode:
return True
@@ -217,9 +244,7 @@ class ProxmoxSnapAnsible(ProxmoxAnsible):
while timeout:
if self.api_task_ok(vm['node'], taskid):
- if vm['type'] == 'lxc' and unbind is True and mountpoints:
- self._container_mp_restore(vm, vmid, timeout, unbind, mountpoints, vmstatus)
- return True
+ break
if timeout == 0:
self.module.fail_json(msg='Reached timeout while waiting for creating VM snapshot. Last line in task before timeout: %s' %
self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
@@ -228,7 +253,9 @@ class ProxmoxSnapAnsible(ProxmoxAnsible):
timeout -= 1
if vm['type'] == 'lxc' and unbind is True and mountpoints:
self._container_mp_restore(vm, vmid, timeout, unbind, mountpoints, vmstatus)
- return False
+
+ self.snapshot_retention(vm, vmid, retention)
+ return timeout > 0
def snapshot_remove(self, vm, vmid, timeout, snapname, force):
if self.module.check_mode:
@@ -275,6 +302,7 @@ def main():
force=dict(type='bool', default=False),
unbind=dict(type='bool', default=False),
vmstate=dict(type='bool', default=False),
+ retention=dict(type='int', default=0),
)
module_args.update(snap_args)
@@ -294,6 +322,7 @@ def main():
force = module.params['force']
unbind = module.params['unbind']
vmstate = module.params['vmstate']
+ retention = module.params['retention']
# If hostname is set get the VM id from ProxmoxAPI
if not vmid and hostname:
@@ -309,7 +338,7 @@ def main():
if i['name'] == snapname:
module.exit_json(changed=False, msg="Snapshot %s is already present" % snapname)
- if proxmox.snapshot_create(vm, vmid, timeout, snapname, description, vmstate, unbind):
+ if proxmox.snapshot_create(vm, vmid, timeout, snapname, description, vmstate, unbind, retention):
if module.check_mode:
module.exit_json(changed=False, msg="Snapshot %s would be created" % snapname)
else:
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_storage_contents_info.py b/ansible_collections/community/general/plugins/modules/proxmox_storage_contents_info.py
new file mode 100644
index 000000000..498490fe4
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/proxmox_storage_contents_info.py
@@ -0,0 +1,144 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright Julian Vanden Broeck (@l00ptr) <julian.vandenbroeck at dalibo.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: proxmox_storage_contents_info
+short_description: List content from a Proxmox VE storage
+version_added: 8.2.0
+description:
+ - Retrieves information about stored objects on a specific storage attached to a node.
+options:
+ storage:
+ description:
+ - Only return content stored on that specific storage.
+ aliases: ['name']
+ type: str
+ required: true
+ node:
+ description:
+ - Proxmox node to which the storage is attached.
+ type: str
+ required: true
+ content:
+ description:
+ - Filter on a specific content type.
+ type: str
+ choices: ["all", "backup", "rootdir", "images", "iso"]
+ default: "all"
+ vmid:
+ description:
+ - Filter on a specific VMID.
+ type: int
+author: Julian Vanden Broeck (@l00ptr)
+extends_documentation_fragment:
+ - community.general.proxmox.documentation
+ - community.general.attributes
+ - community.general.attributes.info_module
+"""
+
+
+EXAMPLES = """
+- name: List existing storages
+ community.general.proxmox_storage_contents_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ storage: lvm2
+ content: backup
+ vmid: 130
+"""
+
+
+RETURN = """
+proxmox_storage_content:
+ description: Content of of storage attached to a node.
+ type: list
+ returned: success
+ elements: dict
+ contains:
+ content:
+ description: Proxmox content of listed objects on this storage.
+ type: str
+ returned: success
+ ctime:
+ description: Creation time of the listed objects.
+ type: str
+ returned: success
+ format:
+ description: Format of the listed objects (can be V(raw), V(pbs-vm), V(iso),...).
+ type: str
+ returned: success
+ size:
+ description: Size of the listed objects.
+ type: int
+ returned: success
+ subtype:
+ description: Subtype of the listed objects (can be V(qemu) or V(lxc)).
+ type: str
+ returned: When storage is dedicated to backup, typically on PBS storage.
+ verification:
+ description: Backup verification status of the listed objects.
+ type: dict
+ returned: When storage is dedicated to backup, typically on PBS storage.
+ sample: {
+ "state": "ok",
+ "upid": "UPID:backup-srv:00130F49:1A12D8375:00001CD7:657A2258:verificationjob:daily\\x3av\\x2dd0cc18c5\\x2d8707:root@pam:"
+ }
+ volid:
+ description: Volume identifier of the listed objects.
+ type: str
+ returned: success
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.proxmox import (
+ ProxmoxAnsible, proxmox_auth_argument_spec)
+
+
+def proxmox_storage_info_argument_spec():
+ return dict(
+ storage=dict(type="str", required=True, aliases=["name"]),
+ content=dict(type="str", required=False, default="all", choices=["all", "backup", "rootdir", "images", "iso"]),
+ vmid=dict(type="int"),
+ node=dict(required=True, type="str"),
+ )
+
+
+def main():
+ module_args = proxmox_auth_argument_spec()
+ storage_info_args = proxmox_storage_info_argument_spec()
+ module_args.update(storage_info_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_one_of=[("api_password", "api_token_id")],
+ required_together=[("api_token_id", "api_token_secret")],
+ supports_check_mode=True,
+ )
+ result = dict(changed=False)
+ proxmox = ProxmoxAnsible(module)
+ res = proxmox.get_storage_content(
+ node=module.params["node"],
+ storage=module.params["storage"],
+ content=None if module.params["content"] == "all" else module.params["content"],
+ vmid=module.params["vmid"],
+ )
+ result["proxmox_storage_content"] = res
+ module.exit_json(**result)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_storage_info.py b/ansible_collections/community/general/plugins/modules/proxmox_storage_info.py
index fd3759364..3c29e59cf 100644
--- a/ansible_collections/community/general/plugins/modules/proxmox_storage_info.py
+++ b/ansible_collections/community/general/plugins/modules/proxmox_storage_info.py
@@ -19,12 +19,12 @@ description:
options:
storage:
description:
- - Only return informations on a specific storage.
+ - Only return information on a specific storage.
aliases: ['name']
type: str
type:
description:
- - Filter on a specifc storage type.
+ - Filter on a specific storage type.
type: str
author: Tristan Le Guern (@tleguern)
extends_documentation_fragment:
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_tasks_info.py b/ansible_collections/community/general/plugins/modules/proxmox_tasks_info.py
index a2e66b38d..d31a04980 100644
--- a/ansible_collections/community/general/plugins/modules/proxmox_tasks_info.py
+++ b/ansible_collections/community/general/plugins/modules/proxmox_tasks_info.py
@@ -37,7 +37,7 @@ extends_documentation_fragment:
EXAMPLES = '''
- name: List tasks on node01
- community.general.proxmox_task_info:
+ community.general.proxmox_tasks_info:
api_host: proxmoxhost
api_user: root@pam
api_password: '{{ password | default(omit) }}'
@@ -47,7 +47,7 @@ EXAMPLES = '''
register: result
- name: Retrieve information about specific tasks on node01
- community.general.proxmox_task_info:
+ community.general.proxmox_tasks_info:
api_host: proxmoxhost
api_user: root@pam
api_password: '{{ password | default(omit) }}'
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_template.py b/ansible_collections/community/general/plugins/modules/proxmox_template.py
index 2bf24ff84..615bfc182 100644
--- a/ansible_collections/community/general/plugins/modules/proxmox_template.py
+++ b/ansible_collections/community/general/plugins/modules/proxmox_template.py
@@ -28,18 +28,18 @@ options:
src:
description:
- Path to uploaded file.
- - Required only for I(state=present).
+ - Required only for O(state=present).
type: path
template:
description:
- The template name.
- - Required for I(state=absent) to delete a template.
- - Required for I(state=present) to download an appliance container template (pveam).
+ - Required for O(state=absent) to delete a template.
+ - Required for O(state=present) to download an appliance container template (pveam).
type: str
content_type:
description:
- Content type.
- - Required only for I(state=present).
+ - Required only for O(state=present).
type: str
default: 'vztmpl'
choices: ['vztmpl', 'iso']
@@ -55,7 +55,7 @@ options:
default: 30
force:
description:
- - It can only be used with I(state=present), existing template will be overwritten.
+ - It can only be used with O(state=present), existing template will be overwritten.
type: bool
default: false
state:
@@ -65,7 +65,8 @@ options:
choices: ['present', 'absent']
default: present
notes:
- - Requires C(proxmoxer) and C(requests) modules on host. This modules can be installed with M(ansible.builtin.pip).
+ - Requires C(proxmoxer) and C(requests) modules on host. Those modules can be installed with M(ansible.builtin.pip).
+ - C(proxmoxer) >= 1.2.0 requires C(requests_toolbelt) to upload files larger than 256 MB.
author: Sergei Antipov (@UnderGreen)
extends_documentation_fragment:
- community.general.proxmox.documentation
@@ -123,15 +124,29 @@ EXAMPLES = '''
import os
import time
+import traceback
-from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible)
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+REQUESTS_TOOLBELT_ERR = None
+try:
+ # requests_toolbelt is used internally by proxmoxer module
+ import requests_toolbelt # noqa: F401, pylint: disable=unused-import
+ HAS_REQUESTS_TOOLBELT = True
+except ImportError:
+ HAS_REQUESTS_TOOLBELT = False
+ REQUESTS_TOOLBELT_ERR = traceback.format_exc()
class ProxmoxTemplateAnsible(ProxmoxAnsible):
def get_template(self, node, storage, content_type, template):
- return [True for tmpl in self.proxmox_api.nodes(node).storage(storage).content.get()
- if tmpl['volid'] == '%s:%s/%s' % (storage, content_type, template)]
+ try:
+ return [True for tmpl in self.proxmox_api.nodes(node).storage(storage).content.get()
+ if tmpl['volid'] == '%s:%s/%s' % (storage, content_type, template)]
+ except Exception as e:
+ self.module.fail_json(msg="Failed to retrieve template '%s:%s/%s': %s" % (storage, content_type, template, e))
def task_status(self, node, taskid, timeout):
"""
@@ -149,12 +164,24 @@ class ProxmoxTemplateAnsible(ProxmoxAnsible):
return False
def upload_template(self, node, storage, content_type, realpath, timeout):
- taskid = self.proxmox_api.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath, 'rb'))
- return self.task_status(node, taskid, timeout)
+ stats = os.stat(realpath)
+ if (LooseVersion(self.proxmoxer_version) >= LooseVersion('1.2.0') and
+ stats.st_size > 268435456 and not HAS_REQUESTS_TOOLBELT):
+ self.module.fail_json(msg="'requests_toolbelt' module is required to upload files larger than 256MB",
+ exception=missing_required_lib('requests_toolbelt'))
+
+ try:
+ taskid = self.proxmox_api.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath, 'rb'))
+ return self.task_status(node, taskid, timeout)
+ except Exception as e:
+ self.module.fail_json(msg="Uploading template %s failed with error: %s" % (realpath, e))
def download_template(self, node, storage, template, timeout):
- taskid = self.proxmox_api.nodes(node).aplinfo.post(storage=storage, template=template)
- return self.task_status(node, taskid, timeout)
+ try:
+ taskid = self.proxmox_api.nodes(node).aplinfo.post(storage=storage, template=template)
+ return self.task_status(node, taskid, timeout)
+ except Exception as e:
+ self.module.fail_json(msg="Downloading template %s failed with error: %s" % (template, e))
def delete_template(self, node, storage, content_type, template, timeout):
volid = '%s:%s/%s' % (storage, content_type, template)
@@ -199,35 +226,32 @@ def main():
timeout = module.params['timeout']
if state == 'present':
- try:
- content_type = module.params['content_type']
- src = module.params['src']
+ content_type = module.params['content_type']
+ src = module.params['src']
- # download appliance template
- if content_type == 'vztmpl' and not src:
- template = module.params['template']
+ # download appliance template
+ if content_type == 'vztmpl' and not src:
+ template = module.params['template']
- if not template:
- module.fail_json(msg='template param for downloading appliance template is mandatory')
+ if not template:
+ module.fail_json(msg='template param for downloading appliance template is mandatory')
- if proxmox.get_template(node, storage, content_type, template) and not module.params['force']:
- module.exit_json(changed=False, msg='template with volid=%s:%s/%s already exists' % (storage, content_type, template))
+ if proxmox.get_template(node, storage, content_type, template) and not module.params['force']:
+ module.exit_json(changed=False, msg='template with volid=%s:%s/%s already exists' % (storage, content_type, template))
- if proxmox.download_template(node, storage, template, timeout):
- module.exit_json(changed=True, msg='template with volid=%s:%s/%s downloaded' % (storage, content_type, template))
+ if proxmox.download_template(node, storage, template, timeout):
+ module.exit_json(changed=True, msg='template with volid=%s:%s/%s downloaded' % (storage, content_type, template))
- template = os.path.basename(src)
- if proxmox.get_template(node, storage, content_type, template) and not module.params['force']:
- module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already exists' % (storage, content_type, template))
- elif not src:
- module.fail_json(msg='src param to uploading template file is mandatory')
- elif not (os.path.exists(src) and os.path.isfile(src)):
- module.fail_json(msg='template file on path %s not exists' % src)
-
- if proxmox.upload_template(node, storage, content_type, src, timeout):
- module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template))
- except Exception as e:
- module.fail_json(msg="uploading/downloading of template %s failed with exception: %s" % (template, e))
+ template = os.path.basename(src)
+ if proxmox.get_template(node, storage, content_type, template) and not module.params['force']:
+ module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already exists' % (storage, content_type, template))
+ elif not src:
+ module.fail_json(msg='src param to uploading template file is mandatory')
+ elif not (os.path.exists(src) and os.path.isfile(src)):
+ module.fail_json(msg='template file on path %s not exists' % src)
+
+ if proxmox.upload_template(node, storage, content_type, src, timeout):
+ module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template))
elif state == 'absent':
try:
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_user_info.py b/ansible_collections/community/general/plugins/modules/proxmox_user_info.py
index a515f2b45..20154528a 100644
--- a/ansible_collections/community/general/plugins/modules/proxmox_user_info.py
+++ b/ansible_collections/community/general/plugins/modules/proxmox_user_info.py
@@ -193,14 +193,14 @@ class ProxmoxUser:
self.user[k] = v
elif k in ['groups', 'tokens'] and (v == '' or v is None):
self.user[k] = []
- elif k == 'groups' and type(v) == str:
+ elif k == 'groups' and isinstance(v, str):
self.user['groups'] = v.split(',')
- elif k == 'tokens' and type(v) == list:
+ elif k == 'tokens' and isinstance(v, list):
for token in v:
if 'privsep' in token:
token['privsep'] = proxmox_to_ansible_bool(token['privsep'])
self.user['tokens'] = v
- elif k == 'tokens' and type(v) == dict:
+ elif k == 'tokens' and isinstance(v, dict):
self.user['tokens'] = list()
for tokenid, tokenvalues in v.items():
t = tokenvalues
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_vm_info.py b/ansible_collections/community/general/plugins/modules/proxmox_vm_info.py
new file mode 100644
index 000000000..30342b684
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/proxmox_vm_info.py
@@ -0,0 +1,267 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2023, Sergei Antipov <greendayonfire at gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: proxmox_vm_info
+short_description: Retrieve information about one or more Proxmox VE virtual machines
+version_added: 7.2.0
+description:
+ - Retrieve information about one or more Proxmox VE virtual machines.
+author: 'Sergei Antipov (@UnderGreen) <greendayonfire at gmail dot com>'
+options:
+ node:
+ description:
+ - Restrict results to a specific Proxmox VE node.
+ type: str
+ type:
+ description:
+ - Restrict results to a specific virtual machine(s) type.
+ type: str
+ choices:
+ - all
+ - qemu
+ - lxc
+ default: all
+ vmid:
+ description:
+ - Restrict results to a specific virtual machine by using its ID.
+ - If VM with the specified vmid does not exist in a cluster then resulting list will be empty.
+ type: int
+ name:
+ description:
+ - Restrict results to a specific virtual machine(s) by using their name.
+ - If VM(s) with the specified name do not exist in a cluster then the resulting list will be empty.
+ type: str
+ config:
+ description:
+ - Whether to retrieve the VM configuration along with VM status.
+ - If set to V(none) (default), no configuration will be returned.
+ - If set to V(current), the current running configuration will be returned.
+ - If set to V(pending), the configuration with pending changes applied will be returned.
+ type: str
+ choices:
+ - none
+ - current
+ - pending
+ default: none
+ version_added: 8.1.0
+extends_documentation_fragment:
+ - community.general.proxmox.documentation
+ - community.general.attributes
+ - community.general.attributes.info_module
+"""
+
+EXAMPLES = """
+- name: List all existing virtual machines on node
+ community.general.proxmox_vm_info:
+ api_host: proxmoxhost
+ api_user: root@pam
+ api_token_id: '{{ token_id | default(omit) }}'
+ api_token_secret: '{{ token_secret | default(omit) }}'
+ node: node01
+
+- name: List all QEMU virtual machines on node
+ community.general.proxmox_vm_info:
+ api_host: proxmoxhost
+ api_user: root@pam
+ api_password: '{{ password | default(omit) }}'
+ node: node01
+ type: qemu
+
+- name: Retrieve information about specific VM by ID
+ community.general.proxmox_vm_info:
+ api_host: proxmoxhost
+ api_user: root@pam
+ api_password: '{{ password | default(omit) }}'
+ node: node01
+ type: qemu
+ vmid: 101
+
+- name: Retrieve information about specific VM by name and get current configuration
+ community.general.proxmox_vm_info:
+ api_host: proxmoxhost
+ api_user: root@pam
+ api_password: '{{ password | default(omit) }}'
+ node: node01
+ type: lxc
+ name: lxc05.home.arpa
+ config: current
+"""
+
+RETURN = """
+proxmox_vms:
+ description: List of virtual machines.
+ returned: on success
+ type: list
+ elements: dict
+ sample:
+ [
+ {
+ "cpu": 0.258944410905281,
+ "cpus": 1,
+ "disk": 0,
+ "diskread": 0,
+ "diskwrite": 0,
+ "id": "qemu/100",
+ "maxcpu": 1,
+ "maxdisk": 34359738368,
+ "maxmem": 4294967296,
+ "mem": 35158379,
+ "name": "pxe.home.arpa",
+ "netin": 99715803,
+ "netout": 14237835,
+ "node": "pve",
+ "pid": 1947197,
+ "status": "running",
+ "template": False,
+ "type": "qemu",
+ "uptime": 135530,
+ "vmid": 100
+ },
+ {
+ "cpu": 0,
+ "cpus": 1,
+ "disk": 0,
+ "diskread": 0,
+ "diskwrite": 0,
+ "id": "qemu/101",
+ "maxcpu": 1,
+ "maxdisk": 0,
+ "maxmem": 536870912,
+ "mem": 0,
+ "name": "test1",
+ "netin": 0,
+ "netout": 0,
+ "node": "pve",
+ "status": "stopped",
+ "template": False,
+ "type": "qemu",
+ "uptime": 0,
+ "vmid": 101
+ }
+ ]
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.proxmox import (
+ proxmox_auth_argument_spec,
+ ProxmoxAnsible,
+ proxmox_to_ansible_bool,
+)
+
+
+class ProxmoxVmInfoAnsible(ProxmoxAnsible):
+ def get_vms_from_cluster_resources(self):
+ try:
+ return self.proxmox_api.cluster().resources().get(type="vm")
+ except Exception as e:
+ self.module.fail_json(
+ msg="Failed to retrieve VMs information from cluster resources: %s" % e
+ )
+
+ def get_vms_from_nodes(self, cluster_machines, type, vmid=None, name=None, node=None, config=None):
+ # Leave in dict only machines that user wants to know about
+ filtered_vms = {
+ vm: info for vm, info in cluster_machines.items() if not (
+ type != info["type"]
+ or (node and info["node"] != node)
+ or (vmid and int(info["vmid"]) != vmid)
+ or (name is not None and info["name"] != name)
+ )
+ }
+ # Get list of unique node names and loop through it to get info about machines.
+ nodes = frozenset([info["node"] for vm, info in filtered_vms.items()])
+ for this_node in nodes:
+ # "type" is mandatory and can have only values of "qemu" or "lxc". Seems that use of reflection is safe.
+ call_vm_getter = getattr(self.proxmox_api.nodes(this_node), type)
+ vms_from_this_node = call_vm_getter().get()
+ for detected_vm in vms_from_this_node:
+ this_vm_id = int(detected_vm["vmid"])
+ desired_vm = filtered_vms.get(this_vm_id, None)
+ if desired_vm:
+ desired_vm.update(detected_vm)
+ desired_vm["vmid"] = this_vm_id
+ desired_vm["template"] = proxmox_to_ansible_bool(desired_vm["template"])
+ # When user wants to retrieve the VM configuration
+ if config != "none":
+ # pending = 0, current = 1
+ config_type = 0 if config == "pending" else 1
+ # GET /nodes/{node}/qemu/{vmid}/config current=[0/1]
+ desired_vm["config"] = call_vm_getter(this_vm_id).config().get(current=config_type)
+ return filtered_vms
+
+ def get_qemu_vms(self, cluster_machines, vmid=None, name=None, node=None, config=None):
+ try:
+ return self.get_vms_from_nodes(cluster_machines, "qemu", vmid, name, node, config)
+ except Exception as e:
+ self.module.fail_json(msg="Failed to retrieve QEMU VMs information: %s" % e)
+
+ def get_lxc_vms(self, cluster_machines, vmid=None, name=None, node=None, config=None):
+ try:
+ return self.get_vms_from_nodes(cluster_machines, "lxc", vmid, name, node, config)
+ except Exception as e:
+ self.module.fail_json(msg="Failed to retrieve LXC VMs information: %s" % e)
+
+
+def main():
+ module_args = proxmox_auth_argument_spec()
+ vm_info_args = dict(
+ node=dict(type="str", required=False),
+ type=dict(
+ type="str", choices=["lxc", "qemu", "all"], default="all", required=False
+ ),
+ vmid=dict(type="int", required=False),
+ name=dict(type="str", required=False),
+ config=dict(
+ type="str", choices=["none", "current", "pending"],
+ default="none", required=False
+ ),
+ )
+ module_args.update(vm_info_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_together=[("api_token_id", "api_token_secret")],
+ required_one_of=[("api_password", "api_token_id")],
+ supports_check_mode=True,
+ )
+
+ proxmox = ProxmoxVmInfoAnsible(module)
+ node = module.params["node"]
+ type = module.params["type"]
+ vmid = module.params["vmid"]
+ name = module.params["name"]
+ config = module.params["config"]
+
+ result = dict(changed=False)
+
+ if node and proxmox.get_node(node) is None:
+ module.fail_json(msg="Node %s doesn't exist in PVE cluster" % node)
+
+ vms_cluster_resources = proxmox.get_vms_from_cluster_resources()
+ cluster_machines = {int(machine["vmid"]): machine for machine in vms_cluster_resources}
+ vms = {}
+
+ if type == "lxc":
+ vms = proxmox.get_lxc_vms(cluster_machines, vmid, name, node, config)
+ elif type == "qemu":
+ vms = proxmox.get_qemu_vms(cluster_machines, vmid, name, node, config)
+ else:
+ vms = proxmox.get_qemu_vms(cluster_machines, vmid, name, node, config)
+ vms.update(proxmox.get_lxc_vms(cluster_machines, vmid, name, node, config))
+
+ result["proxmox_vms"] = [info for vm, info in sorted(vms.items())]
+ module.exit_json(**result)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pubnub_blocks.py b/ansible_collections/community/general/plugins/modules/pubnub_blocks.py
index a03553c5c..34098873a 100644
--- a/ansible_collections/community/general/plugins/modules/pubnub_blocks.py
+++ b/ansible_collections/community/general/plugins/modules/pubnub_blocks.py
@@ -27,7 +27,6 @@ author:
- PubNub <support@pubnub.com> (@pubnub)
- Sergey Mamontov <sergey@pubnub.com> (@parfeon)
requirements:
- - "python >= 2.7"
- "pubnub_blocks_client >= 1.0"
extends_documentation_fragment:
- community.general.attributes
@@ -40,15 +39,15 @@ options:
email:
description:
- Email from account for which new session should be started.
- - "Not required if C(cache) contains result of previous module call (in
+ - "Not required if O(cache) contains result of previous module call (in
same play)."
required: false
type: str
default: ''
password:
description:
- - Password which match to account to which specified C(email) belong.
- - "Not required if C(cache) contains result of previous module call (in
+ - Password which match to account to which specified O(email) belong.
+ - "Not required if O(cache) contains result of previous module call (in
same play)."
required: false
type: str
@@ -63,7 +62,7 @@ options:
default: {}
account:
description:
- - "Name of PubNub account for from which C(application) will be used to
+ - "Name of PubNub account for from which O(application) will be used to
manage blocks."
- "User's account will be used if value not set or empty."
type: str
@@ -71,7 +70,7 @@ options:
application:
description:
- "Name of target PubNub application for which blocks configuration on
- specific C(keyset) will be done."
+ specific O(keyset) will be done."
type: str
required: true
keyset:
@@ -102,7 +101,7 @@ options:
event_handlers:
description:
- "List of event handlers which should be updated for specified block
- C(name)."
+ O(name)."
- "Each entry for new event handler should contain: C(name), C(src),
C(channels), C(event). C(name) used as event handler name which can be
used later to make changes to it."
@@ -110,7 +109,7 @@ options:
- "C(channels) is name of channel from which event handler is waiting
for events."
- "C(event) is type of event which is able to trigger event handler:
- I(js-before-publish), I(js-after-publish), I(js-after-presence)."
+ C(js-before-publish), C(js-after-publish), C(js-after-presence)."
- "Each entry for existing handlers should contain C(name) (so target
handler can be identified). Rest parameters (C(src), C(channels) and
C(event)) can be added if changes required for them."
@@ -127,7 +126,7 @@ options:
description:
- "List of fields which should be changed by block itself (doesn't
affect any event handlers)."
- - "Possible options for change is: C(name)."
+ - "Possible options for change is: O(name)."
required: false
default: {}
type: dict
@@ -136,7 +135,7 @@ options:
- "This key allow to try skip certificates check when performing REST API
calls. Sometimes host may have issues with certificates on it and this
will cause problems to call PubNub REST API."
- - If check should be ignored C(False) should be passed to this parameter.
+ - If check should be ignored V(false) should be passed to this parameter.
required: false
default: true
type: bool
@@ -243,7 +242,7 @@ import os
try:
# Import PubNub BLOCKS client.
- from pubnub_blocks_client import User, Account, Owner, Application, Keyset # noqa: F401, pylint: disable=unused-import
+ from pubnub_blocks_client import User
from pubnub_blocks_client import Block, EventHandler
from pubnub_blocks_client import exceptions
HAS_PUBNUB_BLOCKS_CLIENT = True
diff --git a/ansible_collections/community/general/plugins/modules/pulp_repo.py b/ansible_collections/community/general/plugins/modules/pulp_repo.py
index d7333f89e..c581fa318 100644
--- a/ansible_collections/community/general/plugins/modules/pulp_repo.py
+++ b/ansible_collections/community/general/plugins/modules/pulp_repo.py
@@ -67,7 +67,7 @@ options:
aliases: [ importer_ssl_client_cert ]
feed_client_key:
description:
- - Private key to the certificate specified in I(importer_ssl_client_cert),
+ - Private key to the certificate specified in O(feed_client_cert),
assuming it is not included in the certificate file itself. This can be
the file content or the path to the file.
type: str
@@ -105,7 +105,7 @@ options:
type: str
publish_distributor:
description:
- - Distributor to use when state is C(publish). The default is to
+ - Distributor to use when O(state=publish). The default is to
publish all distributors.
type: str
pulp_host:
@@ -119,13 +119,13 @@ options:
type: str
repo_type:
description:
- - Repo plugin type to use (i.e. C(rpm), C(docker)).
+ - Repo plugin type to use (that is, V(rpm), V(docker)).
default: rpm
type: str
repoview:
description:
- Whether to generate repoview files for a published repository. Setting
- this to C(true) automatically activates C(generate_sqlite).
+ this to V(true) automatically activates O(generate_sqlite).
required: false
type: bool
default: false
@@ -141,23 +141,23 @@ options:
default: true
state:
description:
- - The repo state. A state of C(sync) will queue a sync of the repo.
+ - The repo state. A state of V(sync) will queue a sync of the repo.
This is asynchronous but not delayed like a scheduled sync. A state of
- C(publish) will use the repository's distributor to publish the content.
+ V(publish) will use the repository's distributor to publish the content.
default: present
choices: [ "present", "absent", "sync", "publish" ]
type: str
url_password:
description:
- The password for use in HTTP basic authentication to the pulp API.
- If the I(url_username) parameter is not specified, the I(url_password)
+ If the O(url_username) parameter is not specified, the O(url_password)
parameter will not be used.
url_username:
description:
- The username for use in HTTP basic authentication to the pulp API.
validate_certs:
description:
- - If C(false), SSL certificates will not be validated. This should only be
+ - If V(false), SSL certificates will not be validated. This should only be
used on personally controlled sites using self-signed certificates.
type: bool
default: true
diff --git a/ansible_collections/community/general/plugins/modules/puppet.py b/ansible_collections/community/general/plugins/modules/puppet.py
index cd580791b..86eac062a 100644
--- a/ansible_collections/community/general/plugins/modules/puppet.py
+++ b/ansible_collections/community/general/plugins/modules/puppet.py
@@ -13,7 +13,7 @@ DOCUMENTATION = r'''
module: puppet
short_description: Runs puppet
description:
- - Runs I(puppet) agent or apply in a reliable manner.
+ - Runs C(puppet) agent or apply in a reliable manner.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -24,7 +24,7 @@ attributes:
options:
timeout:
description:
- - How long to wait for I(puppet) to finish.
+ - How long to wait for C(puppet) to finish.
type: str
default: 30m
puppetmaster:
@@ -42,8 +42,8 @@ options:
noop:
description:
- Override puppet.conf noop mode.
- - When C(true), run Puppet agent with C(--noop) switch set.
- - When C(false), run Puppet agent with C(--no-noop) switch set.
+ - When V(true), run Puppet agent with C(--noop) switch set.
+ - When V(false), run Puppet agent with C(--no-noop) switch set.
- When unset (default), use default or puppet.conf value if defined.
type: bool
facts:
@@ -67,8 +67,8 @@ options:
logdest:
description:
- Where the puppet logs should go, if puppet apply is being used.
- - C(all) will go to both C(console) and C(syslog).
- - C(stdout) will be deprecated and replaced by C(console).
+ - V(all) will go to both C(console) and C(syslog).
+ - V(stdout) will be deprecated and replaced by C(console).
type: str
choices: [ all, stdout, syslog ]
default: stdout
@@ -114,8 +114,6 @@ options:
show_diff:
description:
- Whether to print file changes details
- - Alias C(show-diff) has been deprecated and will be removed in community.general 7.0.0.
- aliases: ['show-diff']
type: bool
default: false
requirements:
@@ -198,9 +196,7 @@ def main():
noop=dict(type='bool'),
logdest=dict(type='str', default='stdout', choices=['all', 'stdout', 'syslog']),
# The following is not related to Ansible's diff; see https://github.com/ansible-collections/community.general/pull/3980#issuecomment-1005666154
- show_diff=dict(
- type='bool', default=False, aliases=['show-diff'],
- deprecated_aliases=[dict(name='show-diff', version='7.0.0', collection_name='community.general')]),
+ show_diff=dict(type='bool', default=False),
facts=dict(type='dict'),
facter_basename=dict(type='str', default='ansible'),
environment=dict(type='str'),
diff --git a/ansible_collections/community/general/plugins/modules/pushbullet.py b/ansible_collections/community/general/plugins/modules/pushbullet.py
index c7e20c373..673f30cc3 100644
--- a/ansible_collections/community/general/plugins/modules/pushbullet.py
+++ b/ansible_collections/community/general/plugins/modules/pushbullet.py
@@ -59,7 +59,7 @@ options:
url:
type: str
description:
- - URL field, used when I(push_type) is C(link).
+ - URL field, used when O(push_type=link).
notes:
- Requires pushbullet.py Python package on the remote host.
diff --git a/ansible_collections/community/general/plugins/modules/python_requirements_info.py b/ansible_collections/community/general/plugins/modules/python_requirements_info.py
index 231114a1d..8e709440d 100644
--- a/ansible_collections/community/general/plugins/modules/python_requirements_info.py
+++ b/ansible_collections/community/general/plugins/modules/python_requirements_info.py
@@ -12,7 +12,6 @@ module: python_requirements_info
short_description: Show python path and assert dependency versions
description:
- Get info about available Python requirements on the target host, including listing required libraries and gathering versions.
- - This module was called C(python_requirements_facts) before Ansible 2.9. The usage did not change.
extends_documentation_fragment:
- community.general.attributes
- community.general.attributes.info_module
@@ -23,8 +22,8 @@ options:
description: >
A list of version-likes or module names to check for installation.
Supported operators: <, >, <=, >=, or ==. The bare module name like
- I(ansible), the module with a specific version like I(boto3==1.6.1), or a
- partial version like I(requests>2) are all valid specifications.
+ V(ansible), the module with a specific version like V(boto3==1.6.1), or a
+ partial version like V(requests>2) are all valid specifications.
default: []
author:
- Will Thames (@willthames)
@@ -92,7 +91,7 @@ python_system_path:
- /usr/local/opt/python@2/site-packages/
- /usr/lib/python/site-packages/
valid:
- description: A dictionary of dependencies that matched their desired versions. If no version was specified, then I(desired) will be null
+ description: A dictionary of dependencies that matched their desired versions. If no version was specified, then RV(ignore:desired) will be null
returned: always
type: dict
sample:
diff --git a/ansible_collections/community/general/plugins/modules/rax.py b/ansible_collections/community/general/plugins/modules/rax.py
index 47c0a6d1b..76e429944 100644
--- a/ansible_collections/community/general/plugins/modules/rax.py
+++ b/ansible_collections/community/general/plugins/modules/rax.py
@@ -24,15 +24,15 @@ options:
auto_increment:
description:
- Whether or not to increment a single number with the name of the
- created servers. Only applicable when used with the I(group) attribute
+ created servers. Only applicable when used with the O(group) attribute
or meta key.
type: bool
default: true
boot_from_volume:
description:
- Whether or not to boot the instance from a Cloud Block Storage volume.
- If C(true) and I(image) is specified a new volume will be created at
- boot time. I(boot_volume_size) is required with I(image) to create a
+ If V(true) and O(image) is specified a new volume will be created at
+ boot time. O(boot_volume_size) is required with O(image) to create a
new volume at boot time.
type: bool
default: false
@@ -45,11 +45,11 @@ options:
type: int
description:
- Size of the volume to create in Gigabytes. This is only required with
- I(image) and I(boot_from_volume).
+ O(image) and O(boot_from_volume).
default: 100
boot_volume_terminate:
description:
- - Whether the I(boot_volume) or newly created volume from I(image) will
+ - Whether the O(boot_volume) or newly created volume from O(image) will
be terminated when the server is terminated
type: bool
default: false
@@ -72,16 +72,16 @@ options:
type: str
description:
- Disk partitioning strategy
- - If not specified it will assume the value C(auto).
+ - If not specified it will assume the value V(auto).
choices:
- auto
- manual
exact_count:
description:
- Explicitly ensure an exact count of instances, used with
- state=active/present. If specified as C(true) and I(count) is less than
+ state=active/present. If specified as V(true) and O(count) is less than
the servers matched, servers will be deleted to match the count. If
- the number of matched servers is fewer than specified in I(count)
+ the number of matched servers is fewer than specified in O(count)
additional servers will be added.
type: bool
default: false
@@ -116,7 +116,7 @@ options:
type: str
description:
- image to use for the instance. Can be an C(id), C(human_id) or C(name).
- With I(boot_from_volume), a Cloud Block Storage volume will be created
+ With O(boot_from_volume), a Cloud Block Storage volume will be created
with this image
instance_ids:
type: list
@@ -161,7 +161,7 @@ options:
type: str
description:
- Data to be uploaded to the servers config drive. This option implies
- I(config_drive). Can be a file path or a string
+ O(config_drive). Can be a file path or a string
wait:
description:
- wait for the instance to be in state 'running' before returning
@@ -176,11 +176,11 @@ author:
- "Jesse Keating (@omgjlk)"
- "Matt Martz (@sivel)"
notes:
- - I(exact_count) can be "destructive" if the number of running servers in
- the I(group) is larger than that specified in I(count). In such a case, the
- I(state) is effectively set to C(absent) and the extra servers are deleted.
- In the case of deletion, the returned data structure will have C(action)
- set to C(delete), and the oldest servers in the group will be deleted.
+ - O(exact_count) can be "destructive" if the number of running servers in
+ the O(group) is larger than that specified in O(count). In such a case, the
+ O(state) is effectively set to V(absent) and the extra servers are deleted.
+ In the case of deletion, the returned data structure will have RV(ignore:action)
+ set to V(delete), and the oldest servers in the group will be deleted.
extends_documentation_fragment:
- community.general.rackspace.openstack
- community.general.attributes
diff --git a/ansible_collections/community/general/plugins/modules/rax_cbs.py b/ansible_collections/community/general/plugins/modules/rax_cbs.py
index c99626904..77e7cebad 100644
--- a/ansible_collections/community/general/plugins/modules/rax_cbs.py
+++ b/ansible_collections/community/general/plugins/modules/rax_cbs.py
@@ -13,9 +13,7 @@ DOCUMENTATION = '''
module: rax_cbs
short_description: Manipulate Rackspace Cloud Block Storage Volumes
description:
- - Manipulate Rackspace Cloud Block Storage Volumes
- - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
- - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+ - Manipulate Rackspace Cloud Block Storage Volumes
attributes:
check_mode:
support: none
diff --git a/ansible_collections/community/general/plugins/modules/rax_cbs_attachments.py b/ansible_collections/community/general/plugins/modules/rax_cbs_attachments.py
index 8f540fa0f..00b860a90 100644
--- a/ansible_collections/community/general/plugins/modules/rax_cbs_attachments.py
+++ b/ansible_collections/community/general/plugins/modules/rax_cbs_attachments.py
@@ -13,9 +13,7 @@ DOCUMENTATION = '''
module: rax_cbs_attachments
short_description: Manipulate Rackspace Cloud Block Storage Volume Attachments
description:
- - Manipulate Rackspace Cloud Block Storage Volume Attachments
- - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
- - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+ - Manipulate Rackspace Cloud Block Storage Volume Attachments
attributes:
check_mode:
support: none
diff --git a/ansible_collections/community/general/plugins/modules/rax_cdb.py b/ansible_collections/community/general/plugins/modules/rax_cdb.py
index cf0366d3b..9538579fa 100644
--- a/ansible_collections/community/general/plugins/modules/rax_cdb.py
+++ b/ansible_collections/community/general/plugins/modules/rax_cdb.py
@@ -16,8 +16,6 @@ description:
- creates / deletes or resize a Rackspace Cloud Databases instance
and optionally waits for it to be 'running'. The name option needs to be
unique since it's used to identify the instance.
- - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
- - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
attributes:
check_mode:
support: none
@@ -49,7 +47,7 @@ options:
type: str
description:
- version of database (MySQL supports 5.1 and 5.6, MariaDB supports 10, Percona supports 5.6)
- - "The available choices are: C(5.1), C(5.6) and C(10)."
+ - "The available choices are: V(5.1), V(5.6) and V(10)."
default: '5.6'
aliases: ['version']
state:
diff --git a/ansible_collections/community/general/plugins/modules/rax_cdb_database.py b/ansible_collections/community/general/plugins/modules/rax_cdb_database.py
index 35b076aad..b0db11814 100644
--- a/ansible_collections/community/general/plugins/modules/rax_cdb_database.py
+++ b/ansible_collections/community/general/plugins/modules/rax_cdb_database.py
@@ -13,8 +13,6 @@ module: rax_cdb_database
short_description: Create / delete a database in the Cloud Databases
description:
- create / delete a database in the Cloud Databases.
- - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
- - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
attributes:
check_mode:
support: none
diff --git a/ansible_collections/community/general/plugins/modules/rax_cdb_user.py b/ansible_collections/community/general/plugins/modules/rax_cdb_user.py
index a2cd675d9..6ee86c4fe 100644
--- a/ansible_collections/community/general/plugins/modules/rax_cdb_user.py
+++ b/ansible_collections/community/general/plugins/modules/rax_cdb_user.py
@@ -14,8 +14,6 @@ module: rax_cdb_user
short_description: Create / delete a Rackspace Cloud Database
description:
- create / delete a database in the Cloud Databases.
- - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
- - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
attributes:
check_mode:
support: none
diff --git a/ansible_collections/community/general/plugins/modules/rax_clb.py b/ansible_collections/community/general/plugins/modules/rax_clb.py
index 9a4ca4f89..23c795f39 100644
--- a/ansible_collections/community/general/plugins/modules/rax_clb.py
+++ b/ansible_collections/community/general/plugins/modules/rax_clb.py
@@ -13,9 +13,7 @@ DOCUMENTATION = '''
module: rax_clb
short_description: Create / delete a load balancer in Rackspace Public Cloud
description:
- - creates / deletes a Rackspace Public Cloud load balancer.
- - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
- - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+ - creates / deletes a Rackspace Public Cloud load balancer.
attributes:
check_mode:
support: none
diff --git a/ansible_collections/community/general/plugins/modules/rax_clb_nodes.py b/ansible_collections/community/general/plugins/modules/rax_clb_nodes.py
index 219f0c2ba..c076dced7 100644
--- a/ansible_collections/community/general/plugins/modules/rax_clb_nodes.py
+++ b/ansible_collections/community/general/plugins/modules/rax_clb_nodes.py
@@ -13,9 +13,7 @@ DOCUMENTATION = '''
module: rax_clb_nodes
short_description: Add, modify and remove nodes from a Rackspace Cloud Load Balancer
description:
- - Adds, modifies and removes nodes from a Rackspace Cloud Load Balancer.
- - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
- - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+ - Adds, modifies and removes nodes from a Rackspace Cloud Load Balancer
attributes:
check_mode:
support: none
diff --git a/ansible_collections/community/general/plugins/modules/rax_clb_ssl.py b/ansible_collections/community/general/plugins/modules/rax_clb_ssl.py
index 5dca9d3ec..b794130cf 100644
--- a/ansible_collections/community/general/plugins/modules/rax_clb_ssl.py
+++ b/ansible_collections/community/general/plugins/modules/rax_clb_ssl.py
@@ -12,9 +12,7 @@ DOCUMENTATION = '''
module: rax_clb_ssl
short_description: Manage SSL termination for a Rackspace Cloud Load Balancer
description:
- - Set up, reconfigure, or remove SSL termination for an existing load balancer.
- - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
- - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+- Set up, reconfigure, or remove SSL termination for an existing load balancer.
attributes:
check_mode:
support: none
diff --git a/ansible_collections/community/general/plugins/modules/rax_dns.py b/ansible_collections/community/general/plugins/modules/rax_dns.py
index e70b76914..31782cd88 100644
--- a/ansible_collections/community/general/plugins/modules/rax_dns.py
+++ b/ansible_collections/community/general/plugins/modules/rax_dns.py
@@ -13,9 +13,7 @@ DOCUMENTATION = '''
module: rax_dns
short_description: Manage domains on Rackspace Cloud DNS
description:
- - Manage domains on Rackspace Cloud DNS.
- - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
- - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+ - Manage domains on Rackspace Cloud DNS
attributes:
check_mode:
support: none
diff --git a/ansible_collections/community/general/plugins/modules/rax_dns_record.py b/ansible_collections/community/general/plugins/modules/rax_dns_record.py
index fd3ad47ce..cb3cd279e 100644
--- a/ansible_collections/community/general/plugins/modules/rax_dns_record.py
+++ b/ansible_collections/community/general/plugins/modules/rax_dns_record.py
@@ -13,9 +13,7 @@ DOCUMENTATION = '''
module: rax_dns_record
short_description: Manage DNS records on Rackspace Cloud DNS
description:
- - Manage DNS records on Rackspace Cloud DNS.
- - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
- - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+ - Manage DNS records on Rackspace Cloud DNS
attributes:
check_mode:
support: none
@@ -92,11 +90,9 @@ options:
notes:
- "It is recommended that plays utilizing this module be run with
C(serial: 1) to avoid exceeding the API request limit imposed by
- the Rackspace CloudDNS API"
+ the Rackspace CloudDNS API."
- To manipulate a C(PTR) record either C(loadbalancer) or C(server) must be
- supplied
- - As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record.
- - C(PTR) record support was added in version 1.7
+ supplied.
author: "Matt Martz (@sivel)"
extends_documentation_fragment:
- community.general.rackspace
diff --git a/ansible_collections/community/general/plugins/modules/rax_facts.py b/ansible_collections/community/general/plugins/modules/rax_facts.py
index 9e63fec38..f8bb0e050 100644
--- a/ansible_collections/community/general/plugins/modules/rax_facts.py
+++ b/ansible_collections/community/general/plugins/modules/rax_facts.py
@@ -14,8 +14,6 @@ module: rax_facts
short_description: Gather facts for Rackspace Cloud Servers
description:
- Gather facts for Rackspace Cloud Servers.
- - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
- - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
attributes:
check_mode:
version_added: 3.3.0
diff --git a/ansible_collections/community/general/plugins/modules/rax_files.py b/ansible_collections/community/general/plugins/modules/rax_files.py
index 2d52ebc0f..a63e107eb 100644
--- a/ansible_collections/community/general/plugins/modules/rax_files.py
+++ b/ansible_collections/community/general/plugins/modules/rax_files.py
@@ -13,9 +13,7 @@ DOCUMENTATION = '''
module: rax_files
short_description: Manipulate Rackspace Cloud Files Containers
description:
- - Manipulate Rackspace Cloud Files Containers.
- - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
- - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+ - Manipulate Rackspace Cloud Files Containers
attributes:
check_mode:
support: none
diff --git a/ansible_collections/community/general/plugins/modules/rax_files_objects.py b/ansible_collections/community/general/plugins/modules/rax_files_objects.py
index 08a5cd4e2..bbcdfe4f8 100644
--- a/ansible_collections/community/general/plugins/modules/rax_files_objects.py
+++ b/ansible_collections/community/general/plugins/modules/rax_files_objects.py
@@ -14,8 +14,6 @@ module: rax_files_objects
short_description: Upload, download, and delete objects in Rackspace Cloud Files
description:
- Upload, download, and delete objects in Rackspace Cloud Files.
- - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
- - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
attributes:
check_mode:
support: none
@@ -25,7 +23,7 @@ options:
clear_meta:
description:
- Optionally clear existing metadata when applying metadata to existing objects.
- Selecting this option is only appropriate when setting I(type=meta).
+ Selecting this option is only appropriate when setting O(type=meta).
type: bool
default: false
container:
@@ -38,7 +36,7 @@ options:
description:
- The destination of a C(get) operation; i.e. a local directory, C(/home/user/myfolder).
Used to specify the destination of an operation on a remote object; i.e. a file name,
- C(file1), or a comma-separated list of remote objects, C(file1,file2,file17).
+ V(file1), or a comma-separated list of remote objects, V(file1,file2,file17).
expires:
type: int
description:
@@ -52,8 +50,8 @@ options:
type: str
description:
- >
- The method of operation to be performed: C(put) to upload files, C(get) to download files or
- C(delete) to remove remote objects in Cloud Files.
+ The method of operation to be performed: V(put) to upload files, V(get) to download files or
+ V(delete) to remove remote objects in Cloud Files.
choices:
- get
- put
@@ -63,8 +61,8 @@ options:
type: str
description:
- Source from which to upload files. Used to specify a remote object as a source for
- an operation, i.e. a file name, C(file1), or a comma-separated list of remote objects,
- C(file1,file2,file17). Parameters I(src) and I(dest) are mutually exclusive on remote-only object operations
+ an operation, i.e. a file name, V(file1), or a comma-separated list of remote objects,
+ V(file1,file2,file17). Parameters O(src) and O(dest) are mutually exclusive on remote-only object operations
structure:
description:
- Used to specify whether to maintain nested directory structure when downloading objects
diff --git a/ansible_collections/community/general/plugins/modules/rax_identity.py b/ansible_collections/community/general/plugins/modules/rax_identity.py
index 19f803953..b2eb15627 100644
--- a/ansible_collections/community/general/plugins/modules/rax_identity.py
+++ b/ansible_collections/community/general/plugins/modules/rax_identity.py
@@ -13,9 +13,7 @@ DOCUMENTATION = '''
module: rax_identity
short_description: Load Rackspace Cloud Identity
description:
- - Verifies Rackspace Cloud credentials and returns identity information.
- - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
- - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+ - Verifies Rackspace Cloud credentials and returns identity information
attributes:
check_mode:
support: none
diff --git a/ansible_collections/community/general/plugins/modules/rax_keypair.py b/ansible_collections/community/general/plugins/modules/rax_keypair.py
index 22750f03c..d7d7a2cc3 100644
--- a/ansible_collections/community/general/plugins/modules/rax_keypair.py
+++ b/ansible_collections/community/general/plugins/modules/rax_keypair.py
@@ -13,9 +13,7 @@ DOCUMENTATION = '''
module: rax_keypair
short_description: Create a keypair for use with Rackspace Cloud Servers
description:
- - Create a keypair for use with Rackspace Cloud Servers.
- - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
- - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+ - Create a keypair for use with Rackspace Cloud Servers
attributes:
check_mode:
support: none
diff --git a/ansible_collections/community/general/plugins/modules/rax_meta.py b/ansible_collections/community/general/plugins/modules/rax_meta.py
index 751300858..7b52e906f 100644
--- a/ansible_collections/community/general/plugins/modules/rax_meta.py
+++ b/ansible_collections/community/general/plugins/modules/rax_meta.py
@@ -13,9 +13,7 @@ DOCUMENTATION = '''
module: rax_meta
short_description: Manipulate metadata for Rackspace Cloud Servers
description:
- - Manipulate metadata for Rackspace Cloud Servers.
- - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
- - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+ - Manipulate metadata for Rackspace Cloud Servers
attributes:
check_mode:
support: none
diff --git a/ansible_collections/community/general/plugins/modules/rax_mon_alarm.py b/ansible_collections/community/general/plugins/modules/rax_mon_alarm.py
index f6e650ec0..b66611a90 100644
--- a/ansible_collections/community/general/plugins/modules/rax_mon_alarm.py
+++ b/ansible_collections/community/general/plugins/modules/rax_mon_alarm.py
@@ -13,14 +13,12 @@ DOCUMENTATION = '''
module: rax_mon_alarm
short_description: Create or delete a Rackspace Cloud Monitoring alarm
description:
- - Create or delete a Rackspace Cloud Monitoring alarm that associates an
- existing rax_mon_entity, rax_mon_check, and rax_mon_notification_plan with
- criteria that specify what conditions will trigger which levels of
- notifications. Rackspace monitoring module flow | rax_mon_entity ->
- rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan ->
- *rax_mon_alarm*.
- - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
- - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+- Create or delete a Rackspace Cloud Monitoring alarm that associates an
+ existing rax_mon_entity, rax_mon_check, and rax_mon_notification_plan with
+ criteria that specify what conditions will trigger which levels of
+ notifications. Rackspace monitoring module flow | rax_mon_entity ->
+ rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan ->
+ *rax_mon_alarm*
attributes:
check_mode:
support: none
@@ -30,7 +28,7 @@ options:
state:
type: str
description:
- - Ensure that the alarm with this C(label) exists or does not exist.
+ - Ensure that the alarm with this O(label) exists or does not exist.
choices: [ "present", "absent" ]
required: false
default: present
diff --git a/ansible_collections/community/general/plugins/modules/rax_mon_check.py b/ansible_collections/community/general/plugins/modules/rax_mon_check.py
index 6a0ad03a3..253c26dcf 100644
--- a/ansible_collections/community/general/plugins/modules/rax_mon_check.py
+++ b/ansible_collections/community/general/plugins/modules/rax_mon_check.py
@@ -14,14 +14,12 @@ module: rax_mon_check
short_description: Create or delete a Rackspace Cloud Monitoring check for an
existing entity.
description:
- - Create or delete a Rackspace Cloud Monitoring check associated with an
- existing rax_mon_entity. A check is a specific test or measurement that is
- performed, possibly from different monitoring zones, on the systems you
- monitor. Rackspace monitoring module flow | rax_mon_entity ->
- *rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan ->
- rax_mon_alarm
- - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
- - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+- Create or delete a Rackspace Cloud Monitoring check associated with an
+ existing rax_mon_entity. A check is a specific test or measurement that is
+ performed, possibly from different monitoring zones, on the systems you
+ monitor. Rackspace monitoring module flow | rax_mon_entity ->
+ *rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan ->
+ rax_mon_alarm
attributes:
check_mode:
support: none
@@ -31,7 +29,7 @@ options:
state:
type: str
description:
- - Ensure that a check with this C(label) exists or does not exist.
+ - Ensure that a check with this O(label) exists or does not exist.
choices: ["present", "absent"]
default: present
entity_id:
@@ -52,27 +50,27 @@ options:
that have a non-null C(agent_id).
- |
Choices for this option are:
- - C(remote.dns)
- - C(remote.ftp-banner)
- - C(remote.http)
- - C(remote.imap-banner)
- - C(remote.mssql-banner)
- - C(remote.mysql-banner)
- - C(remote.ping)
- - C(remote.pop3-banner)
- - C(remote.postgresql-banner)
- - C(remote.smtp-banner)
- - C(remote.smtp)
- - C(remote.ssh)
- - C(remote.tcp)
- - C(remote.telnet-banner)
- - C(agent.filesystem)
- - C(agent.memory)
- - C(agent.load_average)
- - C(agent.cpu)
- - C(agent.disk)
- - C(agent.network)
- - C(agent.plugin)
+ - V(remote.dns)
+ - V(remote.ftp-banner)
+ - V(remote.http)
+ - V(remote.imap-banner)
+ - V(remote.mssql-banner)
+ - V(remote.mysql-banner)
+ - V(remote.ping)
+ - V(remote.pop3-banner)
+ - V(remote.postgresql-banner)
+ - V(remote.smtp-banner)
+ - V(remote.smtp)
+ - V(remote.ssh)
+ - V(remote.tcp)
+ - V(remote.telnet-banner)
+ - V(agent.filesystem)
+ - V(agent.memory)
+ - V(agent.load_average)
+ - V(agent.cpu)
+ - V(agent.disk)
+ - V(agent.network)
+ - V(agent.plugin)
required: true
monitoring_zones_poll:
type: str
@@ -83,15 +81,15 @@ options:
target_hostname:
type: str
description:
- - One of I(target_hostname) and I(target_alias) is required for remote.* checks,
+ - One of O(target_hostname) and O(target_alias) is required for remote.* checks,
but prohibited for agent.* checks. The hostname this check should target.
Must be a valid IPv4, IPv6, or FQDN.
target_alias:
type: str
description:
- - One of I(target_alias) and I(target_hostname) is required for remote.* checks,
+ - One of O(target_alias) and O(target_hostname) is required for remote.* checks,
but prohibited for agent.* checks. Use the corresponding key in the entity's
- I(ip_addresses) hash to resolve an IP address to target.
+ C(ip_addresses) hash to resolve an IP address to target.
details:
type: dict
default: {}
@@ -101,7 +99,7 @@ options:
256 items.
disabled:
description:
- - If C(true), ensure the check is created, but don't actually use it yet.
+ - If V(true), ensure the check is created, but don't actually use it yet.
type: bool
default: false
metadata:
diff --git a/ansible_collections/community/general/plugins/modules/rax_mon_entity.py b/ansible_collections/community/general/plugins/modules/rax_mon_entity.py
index b42bd173b..fbad9f98f 100644
--- a/ansible_collections/community/general/plugins/modules/rax_mon_entity.py
+++ b/ansible_collections/community/general/plugins/modules/rax_mon_entity.py
@@ -13,13 +13,11 @@ DOCUMENTATION = '''
module: rax_mon_entity
short_description: Create or delete a Rackspace Cloud Monitoring entity
description:
- - Create or delete a Rackspace Cloud Monitoring entity, which represents a device
- to monitor. Entities associate checks and alarms with a target system and
- provide a convenient, centralized place to store IP addresses. Rackspace
- monitoring module flow | *rax_mon_entity* -> rax_mon_check ->
- rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm.
- - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
- - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+- Create or delete a Rackspace Cloud Monitoring entity, which represents a device
+ to monitor. Entities associate checks and alarms with a target system and
+ provide a convenient, centralized place to store IP addresses. Rackspace
+ monitoring module flow | *rax_mon_entity* -> rax_mon_check ->
+ rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm
attributes:
check_mode:
support: none
diff --git a/ansible_collections/community/general/plugins/modules/rax_mon_notification.py b/ansible_collections/community/general/plugins/modules/rax_mon_notification.py
index 91d079359..7539f2a37 100644
--- a/ansible_collections/community/general/plugins/modules/rax_mon_notification.py
+++ b/ansible_collections/community/general/plugins/modules/rax_mon_notification.py
@@ -26,7 +26,7 @@ options:
state:
type: str
description:
- - Ensure that the notification with this C(label) exists or does not exist.
+ - Ensure that the notification with this O(label) exists or does not exist.
choices: ['present', 'absent']
default: present
label:
diff --git a/ansible_collections/community/general/plugins/modules/rax_mon_notification_plan.py b/ansible_collections/community/general/plugins/modules/rax_mon_notification_plan.py
index ac8b189aa..31647304b 100644
--- a/ansible_collections/community/general/plugins/modules/rax_mon_notification_plan.py
+++ b/ansible_collections/community/general/plugins/modules/rax_mon_notification_plan.py
@@ -14,12 +14,10 @@ module: rax_mon_notification_plan
short_description: Create or delete a Rackspace Cloud Monitoring notification
plan.
description:
- - Create or delete a Rackspace Cloud Monitoring notification plan by
- associating existing rax_mon_notifications with severity levels. Rackspace
- monitoring module flow | rax_mon_entity -> rax_mon_check ->
- rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm.
- - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
- - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+- Create or delete a Rackspace Cloud Monitoring notification plan by
+ associating existing rax_mon_notifications with severity levels. Rackspace
+ monitoring module flow | rax_mon_entity -> rax_mon_check ->
+ rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm
attributes:
check_mode:
support: none
@@ -29,7 +27,7 @@ options:
state:
type: str
description:
- - Ensure that the notification plan with this C(label) exists or does not
+ - Ensure that the notification plan with this O(label) exists or does not
exist.
choices: ['present', 'absent']
default: present
diff --git a/ansible_collections/community/general/plugins/modules/rax_scaling_group.py b/ansible_collections/community/general/plugins/modules/rax_scaling_group.py
index 677a75b33..f4bb79025 100644
--- a/ansible_collections/community/general/plugins/modules/rax_scaling_group.py
+++ b/ansible_collections/community/general/plugins/modules/rax_scaling_group.py
@@ -36,7 +36,7 @@ options:
type: str
description:
- Disk partitioning strategy
- - If not specified, it will fallback to C(auto).
+ - If not specified, it will fallback to V(auto).
choices:
- auto
- manual
@@ -53,7 +53,7 @@ options:
image:
type: str
description:
- - image to use for the instance. Can be an C(id), C(human_id) or C(name)
+ - image to use for the instance. Can be an C(id), C(human_id) or C(name).
required: true
key_name:
type: str
@@ -113,7 +113,7 @@ options:
type: str
description:
- Data to be uploaded to the servers config drive. This option implies
- I(config_drive). Can be a file path or a string
+ O(config_drive). Can be a file path or a string
wait:
description:
- wait for the scaling group to finish provisioning the minimum amount of
diff --git a/ansible_collections/community/general/plugins/modules/rax_scaling_policy.py b/ansible_collections/community/general/plugins/modules/rax_scaling_policy.py
index 60b48bb2a..2869a6910 100644
--- a/ansible_collections/community/general/plugins/modules/rax_scaling_policy.py
+++ b/ansible_collections/community/general/plugins/modules/rax_scaling_policy.py
@@ -25,18 +25,18 @@ options:
description:
- The UTC time when this policy will be executed. The time must be
formatted according to C(yyyy-MM-dd'T'HH:mm:ss.SSS) such as
- C(2013-05-19T08:07:08Z)
+ V(2013-05-19T08:07:08Z)
change:
type: int
description:
- The change, either as a number of servers or as a percentage, to make
in the scaling group. If this is a percentage, you must set
- I(is_percent) to C(true) also.
+ O(is_percent) to V(true) also.
cron:
type: str
description:
- The time when the policy will be executed, as a cron entry. For
- example, if this is parameter is set to C(1 0 * * *)
+ example, if this is parameter is set to V(1 0 * * *).
cooldown:
type: int
description:
@@ -51,7 +51,7 @@ options:
many servers should be in the scaling group.
is_percent:
description:
- - Whether the value in I(change) is a percent value
+ - Whether the value in O(change) is a percent value
default: false
type: bool
name:
diff --git a/ansible_collections/community/general/plugins/modules/read_csv.py b/ansible_collections/community/general/plugins/modules/read_csv.py
index f2a359fa7..3c5901318 100644
--- a/ansible_collections/community/general/plugins/modules/read_csv.py
+++ b/ansible_collections/community/general/plugins/modules/read_csv.py
@@ -33,13 +33,13 @@ options:
key:
description:
- The column name used as a key for the resulting dictionary.
- - If C(key) is unset, the module returns a list of dictionaries,
+ - If O(key) is unset, the module returns a list of dictionaries,
where each dictionary is a row in the CSV file.
type: str
dialect:
description:
- The CSV dialect to use when parsing the CSV file.
- - Possible values include C(excel), C(excel-tab) or C(unix).
+ - Possible values include V(excel), V(excel-tab) or V(unix).
type: str
default: excel
fieldnames:
@@ -50,29 +50,31 @@ options:
elements: str
unique:
description:
- - Whether the C(key) used is expected to be unique.
+ - Whether the O(key) used is expected to be unique.
type: bool
default: true
delimiter:
description:
- A one-character string used to separate fields.
- - When using this parameter, you change the default value used by I(dialect).
+ - When using this parameter, you change the default value used by O(dialect).
- The default value depends on the dialect used.
type: str
skipinitialspace:
description:
- Whether to ignore any whitespaces immediately following the delimiter.
- - When using this parameter, you change the default value used by I(dialect).
+ - When using this parameter, you change the default value used by O(dialect).
- The default value depends on the dialect used.
type: bool
strict:
description:
- Whether to raise an exception on bad CSV input.
- - When using this parameter, you change the default value used by I(dialect).
+ - When using this parameter, you change the default value used by O(dialect).
- The default value depends on the dialect used.
type: bool
-notes:
-- Ansible also ships with the C(csvfile) lookup plugin, which can be used to do selective lookups in CSV files from Jinja.
+seealso:
+ - plugin: ansible.builtin.csvfile
+ plugin_type: lookup
+ description: Can be used to do selective lookups in CSV files from Jinja.
'''
EXAMPLES = r'''
diff --git a/ansible_collections/community/general/plugins/modules/redfish_command.py b/ansible_collections/community/general/plugins/modules/redfish_command.py
index 400677eab..e66380493 100644
--- a/ansible_collections/community/general/plugins/modules/redfish_command.py
+++ b/ansible_collections/community/general/plugins/modules/redfish_command.py
@@ -85,6 +85,22 @@ options:
description:
- Role of account to add/modify.
type: str
+ account_types:
+ required: false
+ aliases: [ account_accounttypes ]
+ description:
+ - Array of account types to apply to a user account.
+ type: list
+ elements: str
+ version_added: '7.2.0'
+ oem_account_types:
+ required: false
+ aliases: [ account_oemaccounttypes ]
+ description:
+ - Array of OEM account types to apply to a user account.
+ type: list
+ elements: str
+ version_added: '7.2.0'
bootdevice:
required: false
description:
@@ -93,7 +109,8 @@ options:
timeout:
description:
- Timeout in seconds for HTTP requests to OOB controller.
- default: 10
+ - The default value for this param is C(10) but that is being deprecated
+ and it will be replaced with C(60) in community.general 9.0.0.
type: int
boot_override_mode:
description:
@@ -137,6 +154,12 @@ options:
- URI of the image for the update.
type: str
version_added: '0.2.0'
+ update_image_file:
+ required: false
+ description:
+ - Filename, with optional path, of the image for the update.
+ type: path
+ version_added: '7.1.0'
update_protocol:
required: false
description:
@@ -180,6 +203,12 @@ options:
- InMaintenanceWindowOnReset
- OnStartUpdateRequest
version_added: '6.1.0'
+ update_oem_params:
+ required: false
+ description:
+ - Properties for HTTP Multipart Push Updates.
+ type: dict
+ version_added: '7.5.0'
update_handle:
required: false
description:
@@ -374,6 +403,20 @@ EXAMPLES = '''
new_password: "{{ new_password }}"
roleid: "{{ roleid }}"
+ - name: Add user with specified account types
+ community.general.redfish_command:
+ category: Accounts
+ command: AddUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ new_username: "{{ new_username }}"
+ new_password: "{{ new_password }}"
+ roleid: "{{ roleid }}"
+ account_types:
+ - Redfish
+ - WebUI
+
- name: Add user using new option aliases
community.general.redfish_command:
category: Accounts
@@ -541,6 +584,32 @@ EXAMPLES = '''
username: operator
password: supersecretpwd
+ - name: Multipart HTTP push update; timeout is 600 seconds to allow for a
+ large image transfer
+ community.general.redfish_command:
+ category: Update
+ command: MultipartHTTPPushUpdate
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 600
+ update_image_file: ~/images/myupdate.img
+
+ - name: Multipart HTTP push with additional options; timeout is 600 seconds
+ to allow for a large image transfer
+ community.general.redfish_command:
+ category: Update
+ command: MultipartHTTPPushUpdate
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 600
+ update_image_file: ~/images/myupdate.img
+ update_targets:
+ - /redfish/v1/UpdateService/FirmwareInventory/BMC
+ update_oem_params:
+ PreserveConfiguration: false
+
- name: Perform requested operations to continue the update
community.general.redfish_command:
category: Update
@@ -687,7 +756,7 @@ from ansible.module_utils.common.text.converters import to_native
# More will be added as module features are expanded
CATEGORY_COMMANDS_ALL = {
"Systems": ["PowerOn", "PowerForceOff", "PowerForceRestart", "PowerGracefulRestart",
- "PowerGracefulShutdown", "PowerReboot", "SetOneTimeBoot", "EnableContinuousBootOverride", "DisableBootOverride",
+ "PowerGracefulShutdown", "PowerReboot", "PowerCycle", "SetOneTimeBoot", "EnableContinuousBootOverride", "DisableBootOverride",
"IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink", "VirtualMediaInsert", "VirtualMediaEject", "VerifyBiosAttributes"],
"Chassis": ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"],
"Accounts": ["AddUser", "EnableUser", "DeleteUser", "DisableUser",
@@ -697,7 +766,7 @@ CATEGORY_COMMANDS_ALL = {
"Manager": ["GracefulRestart", "ClearLogs", "VirtualMediaInsert",
"VirtualMediaEject", "PowerOn", "PowerForceOff", "PowerForceRestart",
"PowerGracefulRestart", "PowerGracefulShutdown", "PowerReboot"],
- "Update": ["SimpleUpdate", "PerformRequestedOperations"],
+ "Update": ["SimpleUpdate", "MultipartHTTPPushUpdate", "PerformRequestedOperations"],
}
@@ -717,17 +786,21 @@ def main():
new_username=dict(aliases=["account_username"]),
new_password=dict(aliases=["account_password"], no_log=True),
roleid=dict(aliases=["account_roleid"]),
+ account_types=dict(type='list', elements='str', aliases=["account_accounttypes"]),
+ oem_account_types=dict(type='list', elements='str', aliases=["account_oemaccounttypes"]),
update_username=dict(type='str', aliases=["account_updatename"]),
account_properties=dict(type='dict', default={}),
bootdevice=dict(),
- timeout=dict(type='int', default=10),
+ timeout=dict(type='int'),
uefi_target=dict(),
boot_next=dict(),
boot_override_mode=dict(choices=['Legacy', 'UEFI']),
resource_id=dict(),
update_image_uri=dict(),
+ update_image_file=dict(type='path'),
update_protocol=dict(),
update_targets=dict(type='list', elements='str', default=[]),
+ update_oem_params=dict(type='dict'),
update_creds=dict(
type='dict',
options=dict(
@@ -766,6 +839,16 @@ def main():
supports_check_mode=False
)
+ if module.params['timeout'] is None:
+ timeout = 10
+ module.deprecate(
+ 'The default value {0} for parameter param1 is being deprecated and it will be replaced by {1}'.format(
+ 10, 60
+ ),
+ version='9.0.0',
+ collection_name='community.general'
+ )
+
category = module.params['category']
command_list = module.params['command']
@@ -775,12 +858,16 @@ def main():
'token': module.params['auth_token']}
# user to add/modify/delete
- user = {'account_id': module.params['id'],
- 'account_username': module.params['new_username'],
- 'account_password': module.params['new_password'],
- 'account_roleid': module.params['roleid'],
- 'account_updatename': module.params['update_username'],
- 'account_properties': module.params['account_properties']}
+ user = {
+ 'account_id': module.params['id'],
+ 'account_username': module.params['new_username'],
+ 'account_password': module.params['new_password'],
+ 'account_roleid': module.params['roleid'],
+ 'account_accounttypes': module.params['account_types'],
+ 'account_oemaccounttypes': module.params['oem_account_types'],
+ 'account_updatename': module.params['update_username'],
+ 'account_properties': module.params['account_properties'],
+ }
# timeout
timeout = module.params['timeout']
@@ -791,10 +878,12 @@ def main():
# update options
update_opts = {
'update_image_uri': module.params['update_image_uri'],
+ 'update_image_file': module.params['update_image_file'],
'update_protocol': module.params['update_protocol'],
'update_targets': module.params['update_targets'],
'update_creds': module.params['update_creds'],
'update_apply_time': module.params['update_apply_time'],
+ 'update_oem_params': module.params['update_oem_params'],
'update_handle': module.params['update_handle'],
}
@@ -940,6 +1029,10 @@ def main():
result = rf_utils.simple_update(update_opts)
if 'update_status' in result:
return_values['update_status'] = result['update_status']
+ elif command == "MultipartHTTPPushUpdate":
+ result = rf_utils.multipath_http_push_update(update_opts)
+ if 'update_status' in result:
+ return_values['update_status'] = result['update_status']
elif command == "PerformRequestedOperations":
result = rf_utils.perform_requested_update_operations(update_opts['update_handle'])
diff --git a/ansible_collections/community/general/plugins/modules/redfish_config.py b/ansible_collections/community/general/plugins/modules/redfish_config.py
index 9f31870e3..1fea9e7cd 100644
--- a/ansible_collections/community/general/plugins/modules/redfish_config.py
+++ b/ansible_collections/community/general/plugins/modules/redfish_config.py
@@ -64,7 +64,8 @@ options:
timeout:
description:
- Timeout in seconds for HTTP requests to OOB controller.
- default: 10
+ - The default value for this param is C(10) but that is being deprecated
+ and it will be replaced with C(60) in community.general 9.0.0.
type: int
boot_order:
required: false
@@ -87,6 +88,12 @@ options:
- ID of the System, Manager or Chassis to modify.
type: str
version_added: '0.2.0'
+ service_id:
+ required: false
+ description:
+ - ID of the manager to update.
+ type: str
+ version_added: '8.4.0'
nic_addr:
required: false
description:
@@ -130,7 +137,35 @@ options:
type: dict
default: {}
version_added: '5.7.0'
-
+ storage_subsystem_id:
+ required: false
+ description:
+ - Id of the Storage Subsystem on which the volume is to be created.
+ type: str
+ default: ''
+ version_added: '7.3.0'
+ volume_ids:
+ required: false
+ description:
+ - List of IDs of volumes to be deleted.
+ type: list
+ default: []
+ elements: str
+ version_added: '7.3.0'
+ secure_boot_enable:
+ required: false
+ description:
+ - Setting parameter to enable or disable SecureBoot.
+ type: bool
+ default: True
+ version_added: '7.5.0'
+ volume_details:
+ required: false
+ description:
+ - Setting dict of volume to be created.
+ type: dict
+ default: {}
+ version_added: '7.5.0'
author:
- "Jose Delarosa (@jose-delarosa)"
- "T S Kushal (@TSKushal)"
@@ -272,6 +307,48 @@ EXAMPLES = '''
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
+
+ - name: Set SecureBoot
+ community.general.redfish_config:
+ category: Systems
+ command: SetSecureBoot
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ secure_boot_enable: True
+
+ - name: Delete All Volumes
+ community.general.redfish_config:
+ category: Systems
+ command: DeleteVolumes
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ storage_subsystem_id: "DExxxxxx"
+ volume_ids: ["volume1", "volume2"]
+
+ - name: Create Volume
+ community.general.redfish_config:
+ category: Systems
+ command: CreateVolume
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ storage_subsystem_id: "DExxxxxx"
+ volume_details:
+ Name: "MR Volume"
+ RAIDType: "RAID0"
+ Drives:
+ - "/redfish/v1/Systems/1/Storage/DE00B000/Drives/1"
+
+ - name: Set service identification to {{ service_id }}
+ community.general.redfish_config:
+ category: Manager
+ command: SetServiceIdentification
+ service_id: "{{ service_id }}"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
'''
RETURN = '''
@@ -290,8 +367,8 @@ from ansible.module_utils.common.text.converters import to_native
# More will be added as module features are expanded
CATEGORY_COMMANDS_ALL = {
"Systems": ["SetBiosDefaultSettings", "SetBiosAttributes", "SetBootOrder",
- "SetDefaultBootOrder", "EnableSecureBoot"],
- "Manager": ["SetNetworkProtocols", "SetManagerNic", "SetHostInterface"],
+ "SetDefaultBootOrder", "EnableSecureBoot", "SetSecureBoot", "DeleteVolumes", "CreateVolume"],
+ "Manager": ["SetNetworkProtocols", "SetManagerNic", "SetHostInterface", "SetServiceIdentification"],
"Sessions": ["SetSessionService"],
}
@@ -307,13 +384,14 @@ def main():
password=dict(no_log=True),
auth_token=dict(no_log=True),
bios_attributes=dict(type='dict', default={}),
- timeout=dict(type='int', default=10),
+ timeout=dict(type='int'),
boot_order=dict(type='list', elements='str', default=[]),
network_protocols=dict(
type='dict',
default={}
),
resource_id=dict(),
+ service_id=dict(),
nic_addr=dict(default='null'),
nic_config=dict(
type='dict',
@@ -323,6 +401,10 @@ def main():
hostinterface_config=dict(type='dict', default={}),
hostinterface_id=dict(),
sessions_config=dict(type='dict', default={}),
+ storage_subsystem_id=dict(type='str', default=''),
+ volume_ids=dict(type='list', default=[], elements='str'),
+ secure_boot_enable=dict(type='bool', default=True),
+ volume_details=dict(type='dict', default={})
),
required_together=[
('username', 'password'),
@@ -336,6 +418,16 @@ def main():
supports_check_mode=False
)
+ if module.params['timeout'] is None:
+ timeout = 10
+ module.deprecate(
+ 'The default value {0} for parameter param1 is being deprecated and it will be replaced by {1}'.format(
+ 10, 60
+ ),
+ version='9.0.0',
+ collection_name='community.general'
+ )
+
category = module.params['category']
command_list = module.params['command']
@@ -369,9 +461,23 @@ def main():
# HostInterface instance ID
hostinterface_id = module.params['hostinterface_id']
+ # Service Identification
+ service_id = module.params['service_id']
+
# Sessions config options
sessions_config = module.params['sessions_config']
+ # Volume deletion options
+ storage_subsystem_id = module.params['storage_subsystem_id']
+ volume_ids = module.params['volume_ids']
+
+ # Set SecureBoot options
+ secure_boot_enable = module.params['secure_boot_enable']
+
+ # Volume creation options
+ volume_details = module.params['volume_details']
+ storage_subsystem_id = module.params['storage_subsystem_id']
+
# Build root URI
root_uri = "https://" + module.params['baseuri']
rf_utils = RedfishUtils(creds, root_uri, timeout, module,
@@ -405,6 +511,12 @@ def main():
result = rf_utils.set_default_boot_order()
elif command == "EnableSecureBoot":
result = rf_utils.enable_secure_boot()
+ elif command == "SetSecureBoot":
+ result = rf_utils.set_secure_boot(secure_boot_enable)
+ elif command == "DeleteVolumes":
+ result = rf_utils.delete_volumes(storage_subsystem_id, volume_ids)
+ elif command == "CreateVolume":
+ result = rf_utils.create_volume(volume_details, storage_subsystem_id)
elif category == "Manager":
# execute only if we find a Manager service resource
@@ -419,6 +531,8 @@ def main():
result = rf_utils.set_manager_nic(nic_addr, nic_config)
elif command == "SetHostInterface":
result = rf_utils.set_hostinterface_attributes(hostinterface_config, hostinterface_id)
+ elif command == "SetServiceIdentification":
+ result = rf_utils.set_service_identification(service_id)
elif category == "Sessions":
# execute only if we find a Sessions resource
diff --git a/ansible_collections/community/general/plugins/modules/redfish_info.py b/ansible_collections/community/general/plugins/modules/redfish_info.py
index 364df40b5..0b39bb6fa 100644
--- a/ansible_collections/community/general/plugins/modules/redfish_info.py
+++ b/ansible_collections/community/general/plugins/modules/redfish_info.py
@@ -16,8 +16,6 @@ description:
- Builds Redfish URIs locally and sends them to remote OOB controllers to
get information back.
- Information retrieved is placed in a location specified by the user.
- - This module was called C(redfish_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(community.general.redfish_info) module no longer returns C(ansible_facts)!
extends_documentation_fragment:
- community.general.attributes
- community.general.attributes.info_module
@@ -57,10 +55,16 @@ options:
- Security token for authenticating to OOB controller.
type: str
version_added: 2.3.0
+ manager:
+ description:
+ - Name of manager on OOB controller to target.
+ type: str
+ version_added: '8.3.0'
timeout:
description:
- Timeout in seconds for HTTP requests to OOB controller.
- default: 10
+ - The default value for this param is C(10) but that is being deprecated
+ and it will be replaced with C(60) in community.general 9.0.0.
type: int
update_handle:
required: false
@@ -249,6 +253,15 @@ EXAMPLES = '''
username: "{{ username }}"
password: "{{ password }}"
+ - name: Get service identification
+ community.general.redfish_info:
+ category: Manager
+ command: GetServiceIdentification
+ manager: "{{ manager }}"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
- name: Get software inventory
community.general.redfish_info:
category: Update
@@ -337,6 +350,14 @@ EXAMPLES = '''
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
+
+ - name: Get BIOS registry
+ community.general.redfish_info:
+ category: Systems
+ command: GetBiosRegistries
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
'''
RETURN = '''
@@ -354,7 +375,7 @@ CATEGORY_COMMANDS_ALL = {
"Systems": ["GetSystemInventory", "GetPsuInventory", "GetCpuInventory",
"GetMemoryInventory", "GetNicInventory", "GetHealthReport",
"GetStorageControllerInventory", "GetDiskInventory", "GetVolumeInventory",
- "GetBiosAttributes", "GetBootOrder", "GetBootOverride", "GetVirtualMedia"],
+ "GetBiosAttributes", "GetBootOrder", "GetBootOverride", "GetVirtualMedia", "GetBiosRegistries"],
"Chassis": ["GetFanInventory", "GetPsuInventory", "GetChassisPower",
"GetChassisThermals", "GetChassisInventory", "GetHealthReport", "GetHPEThermalConfig", "GetHPEFanPercentMin"],
"Accounts": ["ListUsers"],
@@ -362,7 +383,7 @@ CATEGORY_COMMANDS_ALL = {
"Update": ["GetFirmwareInventory", "GetFirmwareUpdateCapabilities", "GetSoftwareInventory",
"GetUpdateStatus"],
"Manager": ["GetManagerNicInventory", "GetVirtualMedia", "GetLogs", "GetNetworkProtocols",
- "GetHealthReport", "GetHostInterfaces", "GetManagerInventory"],
+ "GetHealthReport", "GetHostInterfaces", "GetManagerInventory", "GetServiceIdentification"],
}
CATEGORY_COMMANDS_DEFAULT = {
@@ -386,8 +407,9 @@ def main():
username=dict(),
password=dict(no_log=True),
auth_token=dict(no_log=True),
- timeout=dict(type='int', default=10),
+ timeout=dict(type='int'),
update_handle=dict(),
+ manager=dict(),
),
required_together=[
('username', 'password'),
@@ -401,6 +423,16 @@ def main():
supports_check_mode=True,
)
+ if module.params['timeout'] is None:
+ timeout = 10
+ module.deprecate(
+ 'The default value {0} for parameter param1 is being deprecated and it will be replaced by {1}'.format(
+ 10, 60
+ ),
+ version='9.0.0',
+ collection_name='community.general'
+ )
+
# admin credentials used for authentication
creds = {'user': module.params['username'],
'pswd': module.params['password'],
@@ -412,6 +444,9 @@ def main():
# update handle
update_handle = module.params['update_handle']
+ # manager
+ manager = module.params['manager']
+
# Build root URI
root_uri = "https://" + module.params['baseuri']
rf_utils = RedfishUtils(creds, root_uri, timeout, module)
@@ -478,6 +513,8 @@ def main():
result["health_report"] = rf_utils.get_multi_system_health_report()
elif command == "GetVirtualMedia":
result["virtual_media"] = rf_utils.get_multi_virtualmedia(category)
+ elif command == "GetBiosRegistries":
+ result["bios_registries"] = rf_utils.get_bios_registries()
elif category == "Chassis":
# execute only if we find Chassis resource
@@ -560,6 +597,8 @@ def main():
result["host_interfaces"] = rf_utils.get_hostinterfaces()
elif command == "GetManagerInventory":
result["manager"] = rf_utils.get_multi_manager_inventory()
+ elif command == "GetServiceIdentification":
+ result["service_id"] = rf_utils.get_service_identification(manager)
# Return data back
module.exit_json(redfish_facts=result)
diff --git a/ansible_collections/community/general/plugins/modules/redhat_subscription.py b/ansible_collections/community/general/plugins/modules/redhat_subscription.py
index 79b0d4b4c..d4b47d5d5 100644
--- a/ansible_collections/community/general/plugins/modules/redhat_subscription.py
+++ b/ansible_collections/community/general/plugins/modules/redhat_subscription.py
@@ -26,21 +26,21 @@ notes:
C(subscription-manager) itself gets credentials only as arguments of command line
parameters, which is I(not) secure, as they can be easily stolen by checking the
process listing on the system. Due to limitations of the D-Bus interface of C(rhsm),
- the module will I(not) use D-Bus for registation when trying either to register
- using I(token), or when specifying I(environment), or when the system is old
- (typically RHEL 6 and older).
+ the module will I(not) use D-Bus for registration when trying either to register
+ using O(token), or when specifying O(environment), or when the system is old
+ (typically RHEL 7 older than 7.4, RHEL 6, and older).
- In order to register a system, subscription-manager requires either a username and password, or an activationkey and an Organization ID.
- - Since 2.5 values for I(server_hostname), I(server_insecure), I(rhsm_baseurl),
- I(server_proxy_hostname), I(server_proxy_port), I(server_proxy_user) and
- I(server_proxy_password) are no longer taken from the C(/etc/rhsm/rhsm.conf)
- config file and default to None.
+ - Since 2.5 values for O(server_hostname), O(server_insecure), O(rhsm_baseurl),
+ O(server_proxy_hostname), O(server_proxy_port), O(server_proxy_user) and
+ O(server_proxy_password) are no longer taken from the C(/etc/rhsm/rhsm.conf)
+ config file and default to V(null).
- It is possible to interact with C(subscription-manager) only as root,
so root permissions are required to successfully run this module.
- - Since community.general 6.5.0, credentials (that is, I(username) and I(password),
- I(activationkey), or I(token)) are needed only in case the the system is not registered,
- or I(force_register) is specified; this makes it possible to use the module to tweak an
- already registered system, for example attaching pools to it (using I(pool), or I(pool_ids)),
- and modifying the C(syspurpose) attributes (using I(syspurpose)).
+ - Since community.general 6.5.0, credentials (that is, O(username) and O(password),
+ O(activationkey), or O(token)) are needed only in case the the system is not registered,
+ or O(force_register) is specified; this makes it possible to use the module to tweak an
+ already registered system, for example attaching pools to it (using O(pool), or O(pool_ids)),
+ and modifying the C(syspurpose) attributes (using O(syspurpose)).
requirements:
- subscription-manager
- Optionally the C(dbus) Python library; this is usually included in the OS
@@ -55,7 +55,7 @@ attributes:
options:
state:
description:
- - whether to register and subscribe (C(present)), or unregister (C(absent)) a system
+ - whether to register and subscribe (V(present)), or unregister (V(absent)) a system
choices: [ "present", "absent" ]
default: "present"
type: str
@@ -74,11 +74,11 @@ options:
version_added: 6.3.0
server_hostname:
description:
- - Specify an alternative Red Hat Subscription Management or Red Hat Satellite or Katello server
+ - Specify an alternative Red Hat Subscription Management or Red Hat Satellite or Katello server.
type: str
server_insecure:
description:
- - Enable or disable https server certificate verification when connecting to C(server_hostname)
+ - Enable or disable https server certificate verification when connecting to O(server_hostname).
type: str
server_prefix:
description:
@@ -104,7 +104,7 @@ options:
type: str
server_proxy_scheme:
description:
- - Specify an HTTP proxy scheme, for example C(http) or C(https).
+ - Specify an HTTP proxy scheme, for example V(http) or V(https).
type: str
version_added: 6.2.0
server_proxy_port:
@@ -122,7 +122,9 @@ options:
auto_attach:
description:
- Upon successful registration, auto-consume available subscriptions
- - Added in favor of deprecated autosubscribe in 2.5.
+ - |
+ Please note that the alias O(autosubscribe) will be removed in
+ community.general 9.0.0.
type: bool
aliases: [autosubscribe]
activationkey:
@@ -140,18 +142,26 @@ options:
pool:
description:
- |
- Specify a subscription pool name to consume. Regular expressions accepted. Use I(pool_ids) instead if
- possible, as it is much faster. Mutually exclusive with I(pool_ids).
+ Specify a subscription pool name to consume. Regular expressions accepted.
+ Mutually exclusive with O(pool_ids).
+ - |
+ Please use O(pool_ids) instead: specifying pool IDs is much faster,
+ and it avoids to match new pools that become available for the
+ system and are not explicitly wanted. Also, this option does not
+ support quantities.
+ - |
+ This option is deprecated for the reasons mentioned above,
+ and it will be removed in community.general 10.0.0.
default: '^$'
type: str
pool_ids:
description:
- |
- Specify subscription pool IDs to consume. Prefer over I(pool) when possible as it is much faster.
- A pool ID may be specified as a C(string) - just the pool ID (ex. C(0123456789abcdef0123456789abcdef)),
- or as a C(dict) with the pool ID as the key, and a quantity as the value (ex.
- C(0123456789abcdef0123456789abcdef: 2). If the quantity is provided, it is used to consume multiple
- entitlements from a pool (the pool must support this). Mutually exclusive with I(pool).
+ Specify subscription pool IDs to consume. Prefer over O(pool) when possible as it is much faster.
+ A pool ID may be specified as a C(string) - just the pool ID (for example V(0123456789abcdef0123456789abcdef)),
+ or as a C(dict) with the pool ID as the key, and a quantity as the value (for example
+ V(0123456789abcdef0123456789abcdef: 2). If the quantity is provided, it is used to consume multiple
+ entitlements from a pool (the pool must support this). Mutually exclusive with O(pool).
default: []
type: list
elements: raw
@@ -205,8 +215,8 @@ options:
elements: str
sync:
description:
- - When this option is true, then syspurpose attributes are synchronized with
- RHSM server immediately. When this option is false, then syspurpose attributes
+ - When this option is V(true), then syspurpose attributes are synchronized with
+ RHSM server immediately. When this option is V(false), then syspurpose attributes
will be synchronized with RHSM server by rhsmcertd daemon.
type: bool
default: false
@@ -323,32 +333,12 @@ from ansible.module_utils import distro
SUBMAN_CMD = None
-class RegistrationBase(object):
+class Rhsm(object):
REDHAT_REPO = "/etc/yum.repos.d/redhat.repo"
- def __init__(self, module, username=None, password=None, token=None):
+ def __init__(self, module):
self.module = module
- self.username = username
- self.password = password
- self.token = token
-
- def configure(self):
- raise NotImplementedError("Must be implemented by a sub-class")
-
- def enable(self):
- # Remove any existing redhat.repo
- if isfile(self.REDHAT_REPO):
- unlink(self.REDHAT_REPO)
-
- def register(self):
- raise NotImplementedError("Must be implemented by a sub-class")
-
- def unregister(self):
- raise NotImplementedError("Must be implemented by a sub-class")
-
- def unsubscribe(self):
- raise NotImplementedError("Must be implemented by a sub-class")
def update_plugin_conf(self, plugin, enabled=True):
plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
@@ -369,22 +359,15 @@ class RegistrationBase(object):
fd.close()
self.module.atomic_move(tmpfile, plugin_conf)
- def subscribe(self, **kwargs):
- raise NotImplementedError("Must be implemented by a sub-class")
-
-
-class Rhsm(RegistrationBase):
- def __init__(self, module, username=None, password=None, token=None):
- RegistrationBase.__init__(self, module, username, password, token)
- self.module = module
-
def enable(self):
'''
Enable the system to receive updates from subscription-manager.
This involves updating affected yum plugins and removing any
conflicting yum repositories.
'''
- RegistrationBase.enable(self)
+ # Remove any existing redhat.repo
+ if isfile(self.REDHAT_REPO):
+ unlink(self.REDHAT_REPO)
self.update_plugin_conf('rhnplugin', False)
self.update_plugin_conf('subscription-manager', True)
@@ -431,6 +414,30 @@ class Rhsm(RegistrationBase):
else:
return False
+ def _has_dbus_interface(self):
+ """
+ Checks whether subscription-manager has a D-Bus interface.
+
+ :returns: bool -- whether subscription-manager has a D-Bus interface.
+ """
+
+ def str2int(s, default=0):
+ try:
+ return int(s)
+ except ValueError:
+ return default
+
+ distro_id = distro.id()
+ distro_version = tuple(str2int(p) for p in distro.version_parts())
+
+ # subscription-manager in any supported Fedora version has the interface.
+ if distro_id == 'fedora':
+ return True
+ # Any other distro: assume it is EL;
+ # the D-Bus interface was added to subscription-manager in RHEL 7.4.
+ return (distro_version[0] == 7 and distro_version[1] >= 4) or \
+ distro_version[0] >= 8
+
def _can_connect_to_dbus(self):
"""
Checks whether it is possible to connect to the system D-Bus bus.
@@ -474,7 +481,8 @@ class Rhsm(RegistrationBase):
# of rhsm, so always use the CLI in that case;
# also, since the specified environments are names, and the D-Bus APIs
# require IDs for the environments, use the CLI also in that case
- if not token and not environment and self._can_connect_to_dbus():
+ if (not token and not environment and self._has_dbus_interface() and
+ self._can_connect_to_dbus()):
self._register_using_dbus(was_registered, username, password, auto_attach,
activationkey, org_id, consumer_type,
consumer_name, consumer_id,
@@ -588,7 +596,34 @@ class Rhsm(RegistrationBase):
register_opts = {}
if consumer_type:
- register_opts['consumer_type'] = consumer_type
+ # The option for the consumer type used to be 'type' in versions
+ # of RHEL before 9 & in RHEL 9 before 9.2, and then it changed to
+ # 'consumer_type'; since the Register*() D-Bus functions reject
+ # unknown options, we have to pass the right option depending on
+ # the version -- funky.
+ def supports_option_consumer_type():
+ # subscription-manager in any supported Fedora version
+ # has the new option.
+ if distro_id == 'fedora':
+ return True
+ # Check for RHEL 9 >= 9.2, or RHEL >= 10.
+ if distro_id == 'rhel' and \
+ ((distro_version[0] == 9 and distro_version[1] >= 2) or
+ distro_version[0] >= 10):
+ return True
+ # CentOS: since the change was only done in EL 9, then there is
+ # only CentOS Stream for 9, and thus we can assume it has the
+ # latest version of subscription-manager.
+ if distro_id == 'centos' and distro_version[0] >= 9:
+ return True
+ # Unknown or old distro: assume it does not support
+ # the new option.
+ return False
+
+ consumer_type_key = 'type'
+ if supports_option_consumer_type():
+ consumer_type_key = 'consumer_type'
+ register_opts[consumer_type_key] = consumer_type
if consumer_name:
register_opts['name'] = consumer_name
if consumer_id:
@@ -1056,9 +1091,6 @@ class SysPurpose(object):
def main():
- # Load RHSM configuration from file
- rhsm = Rhsm(None)
-
# Note: the default values for parameters are:
# 'type': 'str', 'default': None, 'required': False
# So there is no need to repeat these values for each parameter.
@@ -1074,11 +1106,25 @@ def main():
'server_port': {},
'rhsm_baseurl': {},
'rhsm_repo_ca_cert': {},
- 'auto_attach': {'aliases': ['autosubscribe'], 'type': 'bool'},
+ 'auto_attach': {
+ 'type': 'bool',
+ 'aliases': ['autosubscribe'],
+ 'deprecated_aliases': [
+ {
+ 'name': 'autosubscribe',
+ 'version': '9.0.0',
+ 'collection_name': 'community.general',
+ },
+ ],
+ },
'activationkey': {'no_log': True},
'org_id': {},
'environment': {},
- 'pool': {'default': '^$'},
+ 'pool': {
+ 'default': '^$',
+ 'removed_in_version': '10.0.0',
+ 'removed_from_collection': 'community.general',
+ },
'pool_ids': {'default': [], 'type': 'list', 'elements': 'raw'},
'consumer_type': {},
'consumer_name': {},
@@ -1119,7 +1165,9 @@ def main():
msg="Interacting with subscription-manager requires root permissions ('become: true')"
)
- rhsm.module = module
+ # Load RHSM configuration from file
+ rhsm = Rhsm(module)
+
state = module.params['state']
username = module.params['username']
password = module.params['password']
diff --git a/ansible_collections/community/general/plugins/modules/redis.py b/ansible_collections/community/general/plugins/modules/redis.py
index 1778a067e..207927cb7 100644
--- a/ansible_collections/community/general/plugins/modules/redis.py
+++ b/ansible_collections/community/general/plugins/modules/redis.py
@@ -26,9 +26,9 @@ options:
command:
description:
- The selected redis command
- - C(config) ensures a configuration setting on an instance.
- - C(flush) flushes all the instance or a specified db.
- - C(replica) sets a redis instance in replica or master mode. (C(slave) is an alias for C(replica).)
+ - V(config) ensures a configuration setting on an instance.
+ - V(flush) flushes all the instance or a specified db.
+ - V(replica) sets a redis instance in replica or master mode. (V(slave) is an alias for V(replica).)
choices: [ config, flush, replica, slave ]
type: str
tls:
@@ -51,7 +51,7 @@ options:
replica_mode:
description:
- The mode of the redis instance [replica command]
- - C(slave) is an alias for C(replica).
+ - V(slave) is an alias for V(replica).
default: replica
choices: [ master, replica, slave ]
type: str
@@ -75,7 +75,7 @@ options:
value:
description:
- A redis config value. When memory size is needed, it is possible
- to specify it in the usal form of 1KB, 2M, 400MB where the base is 1024.
+ to specify it in the usual form of 1KB, 2M, 400MB where the base is 1024.
Units are case insensitive i.e. 1m = 1mb = 1M = 1MB.
type: str
diff --git a/ansible_collections/community/general/plugins/modules/redis_data.py b/ansible_collections/community/general/plugins/modules/redis_data.py
index c0c8dcc9a..fe5cc07ef 100644
--- a/ansible_collections/community/general/plugins/modules/redis_data.py
+++ b/ansible_collections/community/general/plugins/modules/redis_data.py
@@ -121,12 +121,12 @@ EXAMPLES = '''
RETURN = '''
old_value:
description: Value of key before setting.
- returned: on_success if state is C(present) and key exists in database.
+ returned: on_success if O(state=present) and key exists in database.
type: str
sample: 'old_value_of_key'
value:
description: Value key was set to.
- returned: on success if state is C(present).
+ returned: on success if O(state=present).
type: str
sample: 'new_value_of_key'
msg:
diff --git a/ansible_collections/community/general/plugins/modules/redis_data_incr.py b/ansible_collections/community/general/plugins/modules/redis_data_incr.py
index f927fb11f..b359e0cb9 100644
--- a/ansible_collections/community/general/plugins/modules/redis_data_incr.py
+++ b/ansible_collections/community/general/plugins/modules/redis_data_incr.py
@@ -16,15 +16,15 @@ version_added: 4.0.0
description:
- Increment integers or float keys in Redis database and get new value.
- Default increment for all keys is 1. For specific increments use the
- I(increment_int) and I(increment_float) options.
+ O(increment_int) and O(increment_float) options.
author: "Andreas Botzner (@paginabianca)"
attributes:
check_mode:
support: partial
details:
- - For C(check_mode) to work, the specified I(redis_user) needs permission to
+ - For C(check_mode) to work, the specified O(login_user) needs permission to
run the C(GET) command on the key, otherwise the module will fail.
- - When using I(check_mode) the module will try to calculate the value that
+ - When using C(check_mode) the module will try to calculate the value that
Redis would return. If the key is not present, 0.0 is used as value.
diff_mode:
support: none
diff --git a/ansible_collections/community/general/plugins/modules/redis_info.py b/ansible_collections/community/general/plugins/modules/redis_info.py
index b9900a7ca..f352d53d7 100644
--- a/ansible_collections/community/general/plugins/modules/redis_info.py
+++ b/ansible_collections/community/general/plugins/modules/redis_info.py
@@ -17,30 +17,21 @@ version_added: '0.2.0'
description:
- Gathers information and statistics about Redis servers.
extends_documentation_fragment:
+- community.general.redis
- community.general.attributes
- community.general.attributes.info_module
options:
- login_host:
- description:
- - The host running the database.
- type: str
- default: localhost
- login_port:
- description:
- - The port to connect to.
- type: int
- default: 6379
- login_password:
- description:
- - The password used to authenticate with, when authentication is enabled for the Redis server.
- type: str
-notes:
-- Requires the redis-py Python package on the remote host. You can
- install it with pip (C(pip install redis)) or with a package manager.
- U(https://github.com/andymccurdy/redis-py)
+ login_user:
+ version_added: 7.5.0
+ validate_certs:
+ version_added: 7.5.0
+ tls:
+ default: false
+ version_added: 7.5.0
+ ca_certs:
+ version_added: 7.5.0
seealso:
- module: community.general.redis
-requirements: [ redis ]
author: "Pavlo Bashynskyi (@levonet)"
'''
@@ -199,8 +190,10 @@ except ImportError:
REDIS_IMP_ERR = traceback.format_exc()
HAS_REDIS_PACKAGE = False
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native
+from ansible_collections.community.general.plugins.module_utils.redis import (
+ fail_imports, redis_auth_argument_spec, redis_auth_params)
def redis_client(**client_params):
@@ -210,23 +203,16 @@ def redis_client(**client_params):
# Module execution.
def main():
module = AnsibleModule(
- argument_spec=dict(
- login_host=dict(type='str', default='localhost'),
- login_port=dict(type='int', default=6379),
- login_password=dict(type='str', no_log=True),
- ),
+ argument_spec=redis_auth_argument_spec(tls_default=False),
supports_check_mode=True,
)
- if not HAS_REDIS_PACKAGE:
- module.fail_json(msg=missing_required_lib('redis'), exception=REDIS_IMP_ERR)
+ fail_imports(module, module.params['tls'])
- login_host = module.params['login_host']
- login_port = module.params['login_port']
- login_password = module.params['login_password']
+ redis_params = redis_auth_params(module)
# Connect and check
- client = redis_client(host=login_host, port=login_port, password=login_password)
+ client = redis_client(**redis_params)
try:
client.ping()
except Exception as e:
diff --git a/ansible_collections/community/general/plugins/modules/rhevm.py b/ansible_collections/community/general/plugins/modules/rhevm.py
index c129a2df5..7f2300997 100644
--- a/ansible_collections/community/general/plugins/modules/rhevm.py
+++ b/ansible_collections/community/general/plugins/modules/rhevm.py
@@ -141,14 +141,14 @@ options:
default: true
cd_drive:
description:
- - The CD you wish to have mounted on the VM when I(state = 'CD').
+ - The CD you wish to have mounted on the VM when O(state=cd).
type: str
timeout:
description:
- The timeout you wish to define for power actions.
- - When I(state = 'up').
- - When I(state = 'down').
- - When I(state = 'restarted').
+ - When O(state=up).
+ - When O(state=down).
+ - When O(state=restarted).
type: int
'''
diff --git a/ansible_collections/community/general/plugins/modules/rhn_channel.py b/ansible_collections/community/general/plugins/modules/rhn_channel.py
index e544af51e..b69bb0c68 100644
--- a/ansible_collections/community/general/plugins/modules/rhn_channel.py
+++ b/ansible_collections/community/general/plugins/modules/rhn_channel.py
@@ -60,13 +60,23 @@ options:
type: str
validate_certs:
description:
- - If C(False), SSL certificates will not be validated.
- - This should only set to C(False) when used on self controlled sites
+ - If V(false), SSL certificates will not be validated.
+ - This should only set to V(false) when used on self controlled sites
using self-signed certificates, and you are absolutely sure that nobody
can modify traffic between the module and the site.
type: bool
default: true
version_added: '0.2.0'
+deprecated:
+ removed_in: 10.0.0
+ why: |
+ RHN hosted at redhat.com was discontinued years ago, and Spacewalk 5
+ (which uses RHN) is EOL since 2020, May 31st; while this module could
+ work on Uyuni / SUSE Manager (fork of Spacewalk 5), we have not heard
+ about anyone using it in those setups.
+ alternative: |
+ Contact the community.general maintainers to report the usage of this
+ module, and potentially step up to maintain it.
'''
EXAMPLES = '''
diff --git a/ansible_collections/community/general/plugins/modules/rhn_register.py b/ansible_collections/community/general/plugins/modules/rhn_register.py
index 1fe9297d2..cd1b708e4 100644
--- a/ansible_collections/community/general/plugins/modules/rhn_register.py
+++ b/ansible_collections/community/general/plugins/modules/rhn_register.py
@@ -32,7 +32,7 @@ attributes:
options:
state:
description:
- - Whether to register (C(present)), or unregister (C(absent)) a system.
+ - Whether to register (V(present)), or unregister (V(absent)) a system.
type: str
choices: [ absent, present ]
default: present
@@ -47,7 +47,7 @@ options:
server_url:
description:
- Specify an alternative Red Hat Network server URL.
- - The default is the current value of I(serverURL) from C(/etc/sysconfig/rhn/up2date).
+ - The default is the current value of C(serverURL) from C(/etc/sysconfig/rhn/up2date).
type: str
activationkey:
description:
@@ -80,14 +80,24 @@ options:
default: []
enable_eus:
description:
- - If C(false), extended update support will be requested.
+ - If V(false), extended update support will be requested.
type: bool
default: false
nopackages:
description:
- - If C(true), the registered node will not upload its installed packages information to Satellite server.
+ - If V(true), the registered node will not upload its installed packages information to Satellite server.
type: bool
default: false
+deprecated:
+ removed_in: 10.0.0
+ why: |
+ RHN hosted at redhat.com was discontinued years ago, and Spacewalk 5
+ (which uses RHN) is EOL since 2020, May 31st; while this module could
+ work on Uyuni / SUSE Manager (fork of Spacewalk 5), we have not heard
+ about anyone using it in those setups.
+ alternative: |
+ Contact the community.general maintainers to report the usage of this
+ module, and potentially step up to maintain it.
'''
EXAMPLES = r'''
diff --git a/ansible_collections/community/general/plugins/modules/rhsm_release.py b/ansible_collections/community/general/plugins/modules/rhsm_release.py
index 6ac4da6e4..8c74ca819 100644
--- a/ansible_collections/community/general/plugins/modules/rhsm_release.py
+++ b/ansible_collections/community/general/plugins/modules/rhsm_release.py
@@ -16,7 +16,7 @@ description:
- Sets or unsets the release version used by RHSM repositories.
notes:
- This module will fail on an unregistered system.
- Use the C(redhat_subscription) module to register a system
+ Use the M(community.general.redhat_subscription) module to register a system
prior to setting the RHSM release.
- It is possible to interact with C(subscription-manager) only as root,
so root permissions are required to successfully run this module.
@@ -33,7 +33,7 @@ options:
release:
description:
- RHSM release version to use.
- - To unset either pass C(null) for this option, or omit this option.
+ - To unset either pass V(null) for this option, or omit this option.
type: str
author:
- Sean Myers (@seandst)
@@ -77,9 +77,9 @@ def _sm_release(module, *args):
# pass args to s-m release, e.g. _sm_release(module, '--set', '0.1') becomes
# "subscription-manager release --set 0.1"
sm_bin = module.get_bin_path('subscription-manager', required=True)
- cmd = '{0} release {1}'.format(sm_bin, " ".join(args))
+ cmd = [sm_bin, 'release'] + list(args)
# delegate nonzero rc handling to run_command
- return module.run_command(cmd, check_rc=True)
+ return module.run_command(cmd, check_rc=True, expand_user_and_vars=False)
def get_release(module):
diff --git a/ansible_collections/community/general/plugins/modules/rhsm_repository.py b/ansible_collections/community/general/plugins/modules/rhsm_repository.py
index eea6e3857..e58389102 100644
--- a/ansible_collections/community/general/plugins/modules/rhsm_repository.py
+++ b/ansible_collections/community/general/plugins/modules/rhsm_repository.py
@@ -18,7 +18,7 @@ description:
author: Giovanni Sciortino (@giovannisciortino)
notes:
- In order to manage RHSM repositories the system must be already registered
- to RHSM manually or using the Ansible C(redhat_subscription) module.
+ to RHSM manually or using the Ansible M(community.general.redhat_subscription) module.
- It is possible to interact with C(subscription-manager) only as root,
so root permissions are required to successfully run this module.
@@ -36,6 +36,10 @@ options:
description:
- If state is equal to present or disabled, indicates the desired
repository state.
+ - |
+ Please note that V(present) and V(absent) are deprecated, and will be
+ removed in community.general 10.0.0; please use V(enabled) and
+ V(disabled) instead.
choices: [present, enabled, absent, disabled]
default: "enabled"
type: str
@@ -49,8 +53,8 @@ options:
elements: str
purge:
description:
- - Disable all currently enabled repositories that are not not specified in C(name).
- Only set this to C(True) if passing in a list of repositories to the C(name) field.
+ - Disable all currently enabled repositories that are not not specified in O(name).
+ Only set this to V(true) if passing in a list of repositories to the O(name) field.
Using this with C(loop) will most likely not have the desired result.
type: bool
default: false
@@ -86,93 +90,88 @@ repositories:
type: list
'''
-import re
import os
from fnmatch import fnmatch
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
-def run_subscription_manager(module, arguments):
- # Execute subscription-manager with arguments and manage common errors
- rhsm_bin = module.get_bin_path('subscription-manager')
- if not rhsm_bin:
- module.fail_json(msg='The executable file subscription-manager was not found in PATH')
-
- lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
- rc, out, err = module.run_command("%s %s" % (rhsm_bin, " ".join(arguments)), environ_update=lang_env)
-
- if rc == 0 and out == 'This system has no repositories available through subscriptions.\n':
- module.fail_json(msg='This system has no repositories available through subscriptions')
- elif rc == 1:
- module.fail_json(msg='subscription-manager failed with the following error: %s' % err)
- else:
- return rc, out, err
-
-
-def get_repository_list(module, list_parameter):
- # Generate RHSM repository list and return a list of dict
- if list_parameter == 'list_enabled':
- rhsm_arguments = ['repos', '--list-enabled']
- elif list_parameter == 'list_disabled':
- rhsm_arguments = ['repos', '--list-disabled']
- elif list_parameter == 'list':
- rhsm_arguments = ['repos', '--list']
- rc, out, err = run_subscription_manager(module, rhsm_arguments)
-
- skip_lines = [
- '+----------------------------------------------------------+',
- ' Available Repositories in /etc/yum.repos.d/redhat.repo'
- ]
- repo_id_re = re.compile(r'Repo ID:\s+(.*)')
- repo_name_re = re.compile(r'Repo Name:\s+(.*)')
- repo_url_re = re.compile(r'Repo URL:\s+(.*)')
- repo_enabled_re = re.compile(r'Enabled:\s+(.*)')
-
- repo_id = ''
- repo_name = ''
- repo_url = ''
- repo_enabled = ''
-
- repo_result = []
- for line in out.splitlines():
- if line == '' or line in skip_lines:
- continue
-
- repo_id_match = repo_id_re.match(line)
- if repo_id_match:
- repo_id = repo_id_match.group(1)
- continue
-
- repo_name_match = repo_name_re.match(line)
- if repo_name_match:
- repo_name = repo_name_match.group(1)
- continue
-
- repo_url_match = repo_url_re.match(line)
- if repo_url_match:
- repo_url = repo_url_match.group(1)
- continue
-
- repo_enabled_match = repo_enabled_re.match(line)
- if repo_enabled_match:
- repo_enabled = repo_enabled_match.group(1)
-
- repo = {
- "id": repo_id,
- "name": repo_name,
- "url": repo_url,
- "enabled": True if repo_enabled == '1' else False
- }
-
- repo_result.append(repo)
-
- return repo_result
-
-
-def repository_modify(module, state, name, purge=False):
+class Rhsm(object):
+ def __init__(self, module):
+ self.module = module
+ self.rhsm_bin = self.module.get_bin_path('subscription-manager', required=True)
+ self.rhsm_kwargs = {
+ 'environ_update': dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'),
+ 'expand_user_and_vars': False,
+ 'use_unsafe_shell': False,
+ }
+
+ def run_repos(self, arguments):
+ """
+ Execute `subscription-manager repos` with arguments and manage common errors
+ """
+ rc, out, err = self.module.run_command(
+ [self.rhsm_bin, 'repos'] + arguments,
+ **self.rhsm_kwargs
+ )
+
+ if rc == 0 and out == 'This system has no repositories available through subscriptions.\n':
+ self.module.fail_json(msg='This system has no repositories available through subscriptions')
+ elif rc == 1:
+ self.module.fail_json(msg='subscription-manager failed with the following error: %s' % err)
+ else:
+ return rc, out, err
+
+ def list_repositories(self):
+ """
+ Generate RHSM repository list and return a list of dict
+ """
+ rc, out, err = self.run_repos(['--list'])
+
+ repo_id = ''
+ repo_name = ''
+ repo_url = ''
+ repo_enabled = ''
+
+ repo_result = []
+ for line in out.splitlines():
+ # ignore lines that are:
+ # - empty
+ # - "+---------[...]" -- i.e. header
+ # - " Available Repositories [...]" -- i.e. header
+ if line == '' or line[0] == '+' or line[0] == ' ':
+ continue
+
+ if line.startswith('Repo ID: '):
+ repo_id = line[9:].lstrip()
+ continue
+
+ if line.startswith('Repo Name: '):
+ repo_name = line[11:].lstrip()
+ continue
+
+ if line.startswith('Repo URL: '):
+ repo_url = line[10:].lstrip()
+ continue
+
+ if line.startswith('Enabled: '):
+ repo_enabled = line[9:].lstrip()
+
+ repo = {
+ "id": repo_id,
+ "name": repo_name,
+ "url": repo_url,
+ "enabled": True if repo_enabled == '1' else False
+ }
+
+ repo_result.append(repo)
+
+ return repo_result
+
+
+def repository_modify(module, rhsm, state, name, purge=False):
name = set(name)
- current_repo_list = get_repository_list(module, 'list')
+ current_repo_list = rhsm.list_repositories()
updated_repo_list = deepcopy(current_repo_list)
matched_existing_repo = {}
for repoid in name:
@@ -187,7 +186,7 @@ def repository_modify(module, state, name, purge=False):
results = []
diff_before = ""
diff_after = ""
- rhsm_arguments = ['repos']
+ rhsm_arguments = []
for repoid in matched_existing_repo:
if len(matched_existing_repo[repoid]) == 0:
@@ -222,6 +221,9 @@ def repository_modify(module, state, name, purge=False):
diff_after.join("Repository '{repoid}' is disabled for this system\n".format(repoid=repoid))
results.append("Repository '{repoid}' is disabled for this system".format(repoid=repoid))
rhsm_arguments.extend(['--disable', repoid])
+ for updated_repo in updated_repo_list:
+ if updated_repo['id'] in difference:
+ updated_repo['enabled'] = False
diff = {'before': diff_before,
'after': diff_after,
@@ -229,7 +231,7 @@ def repository_modify(module, state, name, purge=False):
'after_header': "RHSM repositories"}
if not module.check_mode and changed:
- rc, out, err = run_subscription_manager(module, rhsm_arguments)
+ rc, out, err = rhsm.run_repos(rhsm_arguments)
results = out.splitlines()
module.exit_json(results=results, changed=changed, repositories=updated_repo_list, diff=diff)
@@ -249,11 +251,21 @@ def main():
msg="Interacting with subscription-manager requires root permissions ('become: true')"
)
+ rhsm = Rhsm(module)
+
name = module.params['name']
state = module.params['state']
purge = module.params['purge']
- repository_modify(module, state, name, purge)
+ if state in ['present', 'absent']:
+ replacement = 'enabled' if state == 'present' else 'disabled'
+ module.deprecate(
+ 'state=%s is deprecated; please use state=%s instead' % (state, replacement),
+ version='10.0.0',
+ collection_name='community.general',
+ )
+
+ repository_modify(module, rhsm, state, name, purge)
if __name__ == '__main__':
diff --git a/ansible_collections/community/general/plugins/modules/riak.py b/ansible_collections/community/general/plugins/modules/riak.py
index 024e5424d..fe295d2d6 100644
--- a/ansible_collections/community/general/plugins/modules/riak.py
+++ b/ansible_collections/community/general/plugins/modules/riak.py
@@ -64,7 +64,7 @@ options:
type: str
validate_certs:
description:
- - If C(false), SSL certificates will not be validated. This should only be used
+ - If V(false), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
type: bool
default: true
diff --git a/ansible_collections/community/general/plugins/modules/rocketchat.py b/ansible_collections/community/general/plugins/modules/rocketchat.py
index 23d6d529e..473f0150a 100644
--- a/ansible_collections/community/general/plugins/modules/rocketchat.py
+++ b/ansible_collections/community/general/plugins/modules/rocketchat.py
@@ -29,8 +29,8 @@ options:
domain:
type: str
description:
- - The domain for your environment without protocol. (i.e.
- C(example.com) or C(chat.example.com))
+ - The domain for your environment without protocol. (For example
+ V(example.com) or V(chat.example.com).)
required: true
token:
type: str
@@ -42,7 +42,7 @@ options:
protocol:
type: str
description:
- - Specify the protocol used to send notification messages before the webhook url. (i.e. http or https)
+ - Specify the protocol used to send notification messages before the webhook URL (that is, V(http) or V(https)).
default: https
choices:
- 'http'
@@ -54,7 +54,7 @@ options:
channel:
type: str
description:
- - Channel to send the message to. If absent, the message goes to the channel selected for the I(token)
+ - Channel to send the message to. If absent, the message goes to the channel selected for the O(token)
specified during the creation of webhook.
username:
type: str
@@ -70,18 +70,20 @@ options:
type: str
description:
- Emoji for the message sender. The representation for the available emojis can be
- got from Rocket Chat. (for example :thumbsup:) (if I(icon_emoji) is set, I(icon_url) will not be used)
+ got from Rocket Chat.
+ - For example V(:thumbsup:).
+ - If O(icon_emoji) is set, O(icon_url) will not be used.
link_names:
type: int
description:
- - Automatically create links for channels and usernames in I(msg).
+ - Automatically create links for channels and usernames in O(msg).
default: 1
choices:
- 1
- 0
validate_certs:
description:
- - If C(false), SSL certificates will not be validated. This should only be used
+ - If V(false), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
type: bool
default: true
diff --git a/ansible_collections/community/general/plugins/modules/rollbar_deployment.py b/ansible_collections/community/general/plugins/modules/rollbar_deployment.py
index 314e65bc6..4bce9ab98 100644
--- a/ansible_collections/community/general/plugins/modules/rollbar_deployment.py
+++ b/ansible_collections/community/general/plugins/modules/rollbar_deployment.py
@@ -63,7 +63,7 @@ options:
default: 'https://api.rollbar.com/api/1/deploy/'
validate_certs:
description:
- - If C(false), SSL certificates for the target url will not be validated.
+ - If V(false), SSL certificates for the target url will not be validated.
This should only be used on personally controlled sites using
self-signed certificates.
required: false
diff --git a/ansible_collections/community/general/plugins/modules/rpm_ostree_pkg.py b/ansible_collections/community/general/plugins/modules/rpm_ostree_pkg.py
index 52219cd1b..826c33f2d 100644
--- a/ansible_collections/community/general/plugins/modules/rpm_ostree_pkg.py
+++ b/ansible_collections/community/general/plugins/modules/rpm_ostree_pkg.py
@@ -35,8 +35,8 @@ options:
state:
description:
- State of the overlay package.
- - C(present) simply ensures that a desired package is installed.
- - C(absent) removes the specified package.
+ - V(present) simply ensures that a desired package is installed.
+ - V(absent) removes the specified package.
choices: [ 'absent', 'present' ]
default: 'present'
type: str
diff --git a/ansible_collections/community/general/plugins/modules/rundeck_acl_policy.py b/ansible_collections/community/general/plugins/modules/rundeck_acl_policy.py
index 77026e633..8f21a3268 100644
--- a/ansible_collections/community/general/plugins/modules/rundeck_acl_policy.py
+++ b/ansible_collections/community/general/plugins/modules/rundeck_acl_policy.py
@@ -108,11 +108,11 @@ rundeck_response:
returned: failed
type: str
before:
- description: Dictionary containing ACL policy informations before modification.
+ description: Dictionary containing ACL policy information before modification.
returned: success
type: dict
after:
- description: Dictionary containing ACL policy informations after modification.
+ description: Dictionary containing ACL policy information after modification.
returned: success
type: dict
'''
diff --git a/ansible_collections/community/general/plugins/modules/rundeck_job_run.py b/ansible_collections/community/general/plugins/modules/rundeck_job_run.py
index 894f1bb6f..2ef144740 100644
--- a/ansible_collections/community/general/plugins/modules/rundeck_job_run.py
+++ b/ansible_collections/community/general/plugins/modules/rundeck_job_run.py
@@ -42,7 +42,7 @@ options:
type: str
description:
- Schedule the job execution to run at specific date and time.
- - ISO-8601 date and time format like C(2021-10-05T15:45:00-03:00).
+ - ISO-8601 date and time format like V(2021-10-05T15:45:00-03:00).
loglevel:
type: str
description:
@@ -64,12 +64,12 @@ options:
description:
- Job execution wait timeout in seconds.
- If the timeout is reached, the job will be aborted.
- - Keep in mind that there is a sleep based on I(wait_execution_delay) after each job status check.
+ - Keep in mind that there is a sleep based on O(wait_execution_delay) after each job status check.
default: 120
abort_on_timeout:
type: bool
description:
- - Send a job abort request if exceeded the I(wait_execution_timeout) specified.
+ - Send a job abort request if exceeded the O(wait_execution_timeout) specified.
default: false
extends_documentation_fragment:
- community.general.rundeck
diff --git a/ansible_collections/community/general/plugins/modules/runit.py b/ansible_collections/community/general/plugins/modules/runit.py
index 7c5882af8..2f1609ca6 100644
--- a/ansible_collections/community/general/plugins/modules/runit.py
+++ b/ansible_collections/community/general/plugins/modules/runit.py
@@ -31,11 +31,11 @@ options:
required: true
state:
description:
- - C(started)/C(stopped) are idempotent actions that will not run
- commands unless necessary. C(restarted) will always bounce the
- service (sv restart) and C(killed) will always bounce the service (sv force-stop).
- C(reloaded) will send a HUP (sv reload).
- C(once) will run a normally downed sv once (sv once), not really
+ - V(started)/V(stopped) are idempotent actions that will not run
+ commands unless necessary. V(restarted) will always bounce the
+ service (sv restart) and V(killed) will always bounce the service (sv force-stop).
+ V(reloaded) will send a HUP (sv reload).
+ V(once) will run a normally downed sv once (sv once), not really
an idempotent operation.
type: str
choices: [ killed, once, reloaded, restarted, started, stopped ]
diff --git a/ansible_collections/community/general/plugins/modules/sap_task_list_execute.py b/ansible_collections/community/general/plugins/modules/sap_task_list_execute.py
deleted file mode 100644
index 14b347e44..000000000
--- a/ansible_collections/community/general/plugins/modules/sap_task_list_execute.py
+++ /dev/null
@@ -1,348 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2021, Rainer Leber <rainerleber@gmail.com>
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = r'''
----
-module: sap_task_list_execute
-short_description: Perform SAP Task list execution
-version_added: "3.5.0"
-description:
- - The C(sap_task_list_execute) module depends on C(pyrfc) Python library (version 2.4.0 and upwards).
- Depending on distribution you are using, you may need to install additional packages to
- have these available.
- - Tasks in the task list which requires manual activities will be confirmed automatically.
- - This module will use the RFC package C(STC_TM_API).
-extends_documentation_fragment:
- - community.general.attributes
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-
-requirements:
- - pyrfc >= 2.4.0
- - xmltodict
-
-options:
- conn_username:
- description: The required username for the SAP system.
- required: true
- type: str
- conn_password:
- description: The required password for the SAP system.
- required: true
- type: str
- host:
- description: The required host for the SAP system. Can be either an FQDN or IP Address.
- required: true
- type: str
- sysnr:
- description:
- - The system number of the SAP system.
- - You must quote the value to ensure retaining the leading zeros.
- default: '00'
- type: str
- client:
- description:
- - The client number to connect to.
- - You must quote the value to ensure retaining the leading zeros.
- default: '000'
- type: str
- task_to_execute:
- description: The task list which will be executed.
- required: true
- type: str
- task_parameters:
- description:
- - The tasks and the parameters for execution.
- - If the task list do not need any parameters. This could be empty.
- - If only specific tasks from the task list should be executed.
- The tasks even when no parameter is needed must be provided.
- Alongside with the module parameter I(task_skip=true).
- type: list
- elements: dict
- suboptions:
- TASKNAME:
- description: The name of the task in the task list.
- type: str
- required: true
- FIELDNAME:
- description: The name of the field of the task.
- type: str
- VALUE:
- description: The value which have to be set.
- type: raw
- task_settings:
- description:
- - Setting for the execution of the task list. This can be the following as in TCODE SE80 described.
- Check Mode C(CHECKRUN), Background Processing Active C(BATCH) (this is the default value),
- Asynchronous Execution C(ASYNC), Trace Mode C(TRACE), Server Name C(BATCH_TARGET).
- default: ['BATCH']
- type: list
- elements: str
- task_skip:
- description:
- - If this parameter is C(true) not defined tasks in I(task_parameters) are skipped.
- - This could be the case when only certain tasks should run from the task list.
- default: false
- type: bool
-
-author:
- - Rainer Leber (@rainerleber)
-'''
-
-EXAMPLES = r'''
-# Pass in a message
-- name: Test task execution
- community.general.sap_task_list_execute:
- conn_username: DDIC
- conn_password: Passwd1234
- host: 10.1.8.10
- sysnr: '01'
- client: '000'
- task_to_execute: SAP_BASIS_SSL_CHECK
- task_settings: batch
-
-- name: Pass in input parameters
- community.general.sap_task_list_execute:
- conn_username: DDIC
- conn_password: Passwd1234
- host: 10.1.8.10
- sysnr: '00'
- client: '000'
- task_to_execute: SAP_BASIS_SSL_CHECK
- task_parameters :
- - { 'TASKNAME': 'CL_STCT_CHECK_SEC_CRYPTO', 'FIELDNAME': 'P_OPT2', 'VALUE': 'X' }
- - TASKNAME: CL_STCT_CHECK_SEC_CRYPTO
- FIELDNAME: P_OPT3
- VALUE: X
- task_settings: batch
-
-# Exported environement variables.
-- name: Hint if module will fail with error message like ImportError libsapnwrfc.so...
- community.general.sap_task_list_execute:
- conn_username: DDIC
- conn_password: Passwd1234
- host: 10.1.8.10
- sysnr: '00'
- client: '000'
- task_to_execute: SAP_BASIS_SSL_CHECK
- task_settings: batch
- environment:
- SAPNWRFC_HOME: /usr/local/sap/nwrfcsdk
- LD_LIBRARY_PATH: /usr/local/sap/nwrfcsdk/lib
-'''
-
-RETURN = r'''
-msg:
- description: A small execution description.
- type: str
- returned: always
- sample: 'Successful'
-out:
- description: A complete description of the executed tasks. If this is available.
- type: list
- elements: dict
- returned: on success
- sample: [...,{
- "LOG": {
- "STCTM_S_LOG": [
- {
- "ACTIVITY": "U_CONFIG",
- "ACTIVITY_DESCR": "Configuration changed",
- "DETAILS": null,
- "EXEC_ID": "20210728184903.815739",
- "FIELD": null,
- "ID": "STC_TASK",
- "LOG_MSG_NO": "000000",
- "LOG_NO": null,
- "MESSAGE": "For radiobutton group ICM too many options are set; choose only one option",
- "MESSAGE_V1": "ICM",
- "MESSAGE_V2": null,
- "MESSAGE_V3": null,
- "MESSAGE_V4": null,
- "NUMBER": "048",
- "PARAMETER": null,
- "PERIOD": "M",
- "PERIOD_DESCR": "Maintenance",
- "ROW": "0",
- "SRC_LINE": "170",
- "SRC_OBJECT": "CL_STCTM_REPORT_UI IF_STCTM_UI_TASK~SET_PARAMETERS",
- "SYSTEM": null,
- "TIMESTMP": "20210728184903",
- "TSTPNM": "DDIC",
- "TYPE": "E"
- },...
- ]}}]
-'''
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-import traceback
-try:
- from pyrfc import Connection
-except ImportError:
- HAS_PYRFC_LIBRARY = False
- PYRFC_LIBRARY_IMPORT_ERROR = traceback.format_exc()
-else:
- HAS_PYRFC_LIBRARY = True
- PYRFC_LIBRARY_IMPORT_ERROR = None
-try:
- import xmltodict
-except ImportError:
- HAS_XMLTODICT_LIBRARY = False
- XMLTODICT_LIBRARY_IMPORT_ERROR = traceback.format_exc()
-else:
- HAS_XMLTODICT_LIBRARY = True
- XMLTODICT_LIBRARY_IMPORT_ERROR = None
-
-
-def call_rfc_method(connection, method_name, kwargs):
- # PyRFC call function
- return connection.call(method_name, **kwargs)
-
-
-def process_exec_settings(task_settings):
- # processes task settings to objects
- exec_settings = {}
- for settings in task_settings:
- temp_dict = {settings.upper(): 'X'}
- for key, value in temp_dict.items():
- exec_settings[key] = value
- return exec_settings
-
-
-def xml_to_dict(xml_raw):
- try:
- xml_parsed = xmltodict.parse(xml_raw, dict_constructor=dict)
- xml_dict = xml_parsed['asx:abap']['asx:values']['SESSION']['TASKLIST']
- except KeyError:
- xml_dict = "No logs available."
- return xml_dict
-
-
-def run_module():
-
- params_spec = dict(
- TASKNAME=dict(type='str', required=True),
- FIELDNAME=dict(type='str'),
- VALUE=dict(type='raw'),
- )
-
- # define available arguments/parameters a user can pass to the module
- module = AnsibleModule(
- argument_spec=dict(
- # values for connection
- conn_username=dict(type='str', required=True),
- conn_password=dict(type='str', required=True, no_log=True),
- host=dict(type='str', required=True),
- sysnr=dict(type='str', default="00"),
- client=dict(type='str', default="000"),
- # values for execution tasks
- task_to_execute=dict(type='str', required=True),
- task_parameters=dict(type='list', elements='dict', options=params_spec),
- task_settings=dict(type='list', elements='str', default=['BATCH']),
- task_skip=dict(type='bool', default=False),
- ),
- supports_check_mode=False,
- )
- result = dict(changed=False, msg='', out={})
-
- params = module.params
-
- username = params['conn_username'].upper()
- password = params['conn_password']
- host = params['host']
- sysnr = params['sysnr']
- client = params['client']
-
- task_parameters = params['task_parameters']
- task_to_execute = params['task_to_execute']
- task_settings = params['task_settings']
- task_skip = params['task_skip']
-
- if not HAS_PYRFC_LIBRARY:
- module.fail_json(
- msg=missing_required_lib('pyrfc'),
- exception=PYRFC_LIBRARY_IMPORT_ERROR)
-
- if not HAS_XMLTODICT_LIBRARY:
- module.fail_json(
- msg=missing_required_lib('xmltodict'),
- exception=XMLTODICT_LIBRARY_IMPORT_ERROR)
-
- # basic RFC connection with pyrfc
- try:
- conn = Connection(user=username, passwd=password, ashost=host, sysnr=sysnr, client=client)
- except Exception as err:
- result['error'] = str(err)
- result['msg'] = 'Something went wrong connecting to the SAP system.'
- module.fail_json(**result)
-
- try:
- raw_params = call_rfc_method(conn, 'STC_TM_SCENARIO_GET_PARAMETERS',
- {'I_SCENARIO_ID': task_to_execute})
- except Exception as err:
- result['error'] = str(err)
- result['msg'] = 'The task list does not exsist.'
- module.fail_json(**result)
- exec_settings = process_exec_settings(task_settings)
- # initialize session task
- session_init = call_rfc_method(conn, 'STC_TM_SESSION_BEGIN',
- {'I_SCENARIO_ID': task_to_execute,
- 'I_INIT_ONLY': 'X'})
- # Confirm Tasks which requires manual activities from Task List Run
- for task in raw_params['ET_PARAMETER']:
- call_rfc_method(conn, 'STC_TM_TASK_CONFIRM',
- {'I_SESSION_ID': session_init['E_SESSION_ID'],
- 'I_TASKNAME': task['TASKNAME']})
- if task_skip:
- for task in raw_params['ET_PARAMETER']:
- call_rfc_method(conn, 'STC_TM_TASK_SKIP',
- {'I_SESSION_ID': session_init['E_SESSION_ID'],
- 'I_TASKNAME': task['TASKNAME'], 'I_SKIP_DEP_TASKS': 'X'})
- # unskip defined tasks and set parameters
- if task_parameters is not None:
- for task in task_parameters:
- call_rfc_method(conn, 'STC_TM_TASK_UNSKIP',
- {'I_SESSION_ID': session_init['E_SESSION_ID'],
- 'I_TASKNAME': task['TASKNAME'], 'I_UNSKIP_DEP_TASKS': 'X'})
-
- call_rfc_method(conn, 'STC_TM_SESSION_SET_PARAMETERS',
- {'I_SESSION_ID': session_init['E_SESSION_ID'],
- 'IT_PARAMETER': task_parameters})
- # start the task
- try:
- session_start = call_rfc_method(conn, 'STC_TM_SESSION_RESUME',
- {'I_SESSION_ID': session_init['E_SESSION_ID'],
- 'IS_EXEC_SETTINGS': exec_settings})
- except Exception as err:
- result['error'] = str(err)
- result['msg'] = 'Something went wrong. See error.'
- module.fail_json(**result)
- # get task logs because the execution may successfully but the tasks shows errors or warnings
- # returned value is ABAPXML https://help.sap.com/doc/abapdocu_755_index_htm/7.55/en-US/abenabap_xslt_asxml_general.htm
- session_log = call_rfc_method(conn, 'STC_TM_SESSION_GET_LOG',
- {'I_SESSION_ID': session_init['E_SESSION_ID']})
-
- task_list = xml_to_dict(session_log['E_LOG'])
-
- result['changed'] = True
- result['msg'] = session_start['E_STATUS_DESCR']
- result['out'] = task_list
-
- module.exit_json(**result)
-
-
-def main():
- run_module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/sapcar_extract.py b/ansible_collections/community/general/plugins/modules/sapcar_extract.py
deleted file mode 100644
index badd466e1..000000000
--- a/ansible_collections/community/general/plugins/modules/sapcar_extract.py
+++ /dev/null
@@ -1,228 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2021, Rainer Leber <rainerleber@gmail.com>
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: sapcar_extract
-short_description: Manages SAP SAPCAR archives
-version_added: "3.2.0"
-description:
- - Provides support for unpacking C(sar)/C(car) files with the SAPCAR binary from SAP and pulling
- information back into Ansible.
-extends_documentation_fragment:
- - community.general.attributes
-attributes:
- check_mode:
- support: partial
- details:
- - Always returns C(changed=true) in check mode.
- diff_mode:
- support: none
-options:
- path:
- description: The path to the SAR/CAR file.
- type: path
- required: true
- dest:
- description:
- - The destination where SAPCAR extracts the SAR file. Missing folders will be created.
- If this parameter is not provided it will unpack in the same folder as the SAR file.
- type: path
- binary_path:
- description:
- - The path to the SAPCAR binary, for example, C(/home/dummy/sapcar) or C(https://myserver/SAPCAR).
- If this parameter is not provided the module will look in C(PATH).
- type: path
- signature:
- description:
- - If C(true) the signature will be extracted.
- default: false
- type: bool
- security_library:
- description:
- - The path to the security library, for example, C(/usr/sap/hostctrl/exe/libsapcrytp.so), for signature operations.
- type: path
- manifest:
- description:
- - The name of the manifest.
- default: "SIGNATURE.SMF"
- type: str
- remove:
- description:
- - If C(true) the SAR/CAR file will be removed. B(This should be used with caution!)
- default: false
- type: bool
-author:
- - Rainer Leber (@RainerLeber)
-'''
-
-EXAMPLES = """
-- name: Extract SAR file
- community.general.sapcar_extract:
- path: "~/source/hana.sar"
-
-- name: Extract SAR file with destination
- community.general.sapcar_extract:
- path: "~/source/hana.sar"
- dest: "~/test/"
-
-- name: Extract SAR file with destination and download from webserver can be a fileshare as well
- community.general.sapcar_extract:
- path: "~/source/hana.sar"
- dest: "~/dest/"
- binary_path: "https://myserver/SAPCAR"
-
-- name: Extract SAR file and delete SAR after extract
- community.general.sapcar_extract:
- path: "~/source/hana.sar"
- remove: true
-
-- name: Extract SAR file with manifest
- community.general.sapcar_extract:
- path: "~/source/hana.sar"
- signature: true
-
-- name: Extract SAR file with manifest and rename it
- community.general.sapcar_extract:
- path: "~/source/hana.sar"
- manifest: "MyNewSignature.SMF"
- signature: true
-"""
-
-import os
-from tempfile import NamedTemporaryFile
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.urls import open_url
-from ansible.module_utils.common.text.converters import to_native
-
-
-def get_list_of_files(dir_name):
- # create a list of file and directories
- # names in the given directory
- list_of_file = os.listdir(dir_name)
- allFiles = list()
- # Iterate over all the entries
- for entry in list_of_file:
- # Create full path
- fullPath = os.path.join(dir_name, entry)
- # If entry is a directory then get the list of files in this directory
- if os.path.isdir(fullPath):
- allFiles = allFiles + [fullPath]
- allFiles = allFiles + get_list_of_files(fullPath)
- else:
- allFiles.append(fullPath)
- return allFiles
-
-
-def download_SAPCAR(binary_path, module):
- bin_path = None
- # download sapcar binary if url is provided otherwise path is returned
- if binary_path is not None:
- if binary_path.startswith('https://') or binary_path.startswith('http://'):
- random_file = NamedTemporaryFile(delete=False)
- with open_url(binary_path) as response:
- with random_file as out_file:
- data = response.read()
- out_file.write(data)
- os.chmod(out_file.name, 0o700)
- bin_path = out_file.name
- module.add_cleanup_file(bin_path)
- else:
- bin_path = binary_path
- return bin_path
-
-
-def check_if_present(command, path, dest, signature, manifest, module):
- # manipuliating output from SAR file for compare with already extracted files
- iter_command = [command, '-tvf', path]
- sar_out = module.run_command(iter_command)[1]
- sar_raw = sar_out.split("\n")[1:]
- if dest[-1] != "/":
- dest = dest + "/"
- sar_files = [dest + x.split(" ")[-1] for x in sar_raw if x]
- # remove any SIGNATURE.SMF from list because it will not unpacked if signature is false
- if not signature:
- sar_files = [item for item in sar_files if '.SMF' not in item]
- # if signature is renamed manipulate files in list of sar file for compare.
- if manifest != "SIGNATURE.SMF":
- sar_files = [item for item in sar_files if '.SMF' not in item]
- sar_files = sar_files + [manifest]
- # get extracted files if present
- files_extracted = get_list_of_files(dest)
- # compare extracted files with files in sar file
- present = all(elem in files_extracted for elem in sar_files)
- return present
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- path=dict(type='path', required=True),
- dest=dict(type='path'),
- binary_path=dict(type='path'),
- signature=dict(type='bool', default=False),
- security_library=dict(type='path'),
- manifest=dict(type='str', default="SIGNATURE.SMF"),
- remove=dict(type='bool', default=False),
- ),
- supports_check_mode=True,
- )
- rc, out, err = [0, "", ""]
- params = module.params
- check_mode = module.check_mode
-
- path = params['path']
- dest = params['dest']
- signature = params['signature']
- security_library = params['security_library']
- manifest = params['manifest']
- remove = params['remove']
-
- bin_path = download_SAPCAR(params['binary_path'], module)
-
- if dest is None:
- dest_head_tail = os.path.split(path)
- dest = dest_head_tail[0] + '/'
- else:
- if not os.path.exists(dest):
- os.makedirs(dest, 0o755)
-
- if bin_path is not None:
- command = [module.get_bin_path(bin_path, required=True)]
- else:
- try:
- command = [module.get_bin_path('sapcar', required=True)]
- except Exception as e:
- module.fail_json(msg='Failed to find SAPCAR at the expected path or URL "{0}". Please check whether it is available: {1}'
- .format(bin_path, to_native(e)))
-
- present = check_if_present(command[0], path, dest, signature, manifest, module)
-
- if not present:
- command.extend(['-xvf', path, '-R', dest])
- if security_library:
- command.extend(['-L', security_library])
- if signature:
- command.extend(['-manifest', manifest])
- if not check_mode:
- (rc, out, err) = module.run_command(command, check_rc=True)
- changed = True
- else:
- changed = False
- out = "already unpacked"
-
- if remove:
- os.remove(path)
-
- module.exit_json(changed=changed, message=rc, stdout=out,
- stderr=err, command=' '.join(command))
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_compute.py b/ansible_collections/community/general/plugins/modules/scaleway_compute.py
index 9bd821807..7f85bc668 100644
--- a/ansible_collections/community/general/plugins/modules/scaleway_compute.py
+++ b/ansible_collections/community/general/plugins/modules/scaleway_compute.py
@@ -37,8 +37,8 @@ options:
description:
- Manage public IP on a Scaleway server
- Could be Scaleway IP address UUID
- - C(dynamic) Means that IP is destroyed at the same time the host is destroyed
- - C(absent) Means no public IP at all
+ - V(dynamic) Means that IP is destroyed at the same time the host is destroyed
+ - V(absent) Means no public IP at all
default: absent
enable_ipv6:
@@ -62,13 +62,13 @@ options:
type: str
description:
- Organization identifier.
- - Exactly one of I(project) and I(organization) must be specified.
+ - Exactly one of O(project) and O(organization) must be specified.
project:
type: str
description:
- Project identifier.
- - Exactly one of I(project) and I(organization) must be specified.
+ - Exactly one of O(project) and O(organization) must be specified.
version_added: 4.3.0
state:
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_compute_private_network.py b/ansible_collections/community/general/plugins/modules/scaleway_compute_private_network.py
index 9a9d9adde..b41720be5 100644
--- a/ansible_collections/community/general/plugins/modules/scaleway_compute_private_network.py
+++ b/ansible_collections/community/general/plugins/modules/scaleway_compute_private_network.py
@@ -49,7 +49,7 @@ options:
region:
type: str
description:
- - Scaleway region to use (for example C(par1)).
+ - Scaleway region to use (for example V(par1)).
required: true
choices:
- ams1
@@ -98,7 +98,7 @@ EXAMPLES = '''
RETURN = '''
scaleway_compute_private_network:
description: Information on the VPC.
- returned: success when I(state=present)
+ returned: success when O(state=present)
type: dict
sample:
{
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_container.py b/ansible_collections/community/general/plugins/modules/scaleway_container.py
index 19ffae419..8764a7634 100644
--- a/ansible_collections/community/general/plugins/modules/scaleway_container.py
+++ b/ansible_collections/community/general/plugins/modules/scaleway_container.py
@@ -51,7 +51,7 @@ options:
region:
type: str
description:
- - Scaleway region to use (for example C(fr-par)).
+ - Scaleway region to use (for example V(fr-par)).
required: true
choices:
- fr-par
@@ -90,7 +90,7 @@ options:
secret_environment_variables:
description:
- Secret environment variables of the container namespace.
- - Updating thoses values will not output a C(changed) state in Ansible.
+ - Updating those values will not output a C(changed) state in Ansible.
- Injected in container at runtime.
type: dict
default: {}
@@ -109,7 +109,7 @@ options:
privacy:
description:
- Privacy policies define whether a container can be executed anonymously.
- - Choose C(public) to enable anonymous execution, or C(private) to protect your container with an authentication mechanism provided by the Scaleway API.
+ - Choose V(public) to enable anonymous execution, or V(private) to protect your container with an authentication mechanism provided by the Scaleway API.
type: str
default: public
choices:
@@ -174,7 +174,7 @@ EXAMPLES = '''
RETURN = '''
container:
description: The container information.
- returned: when I(state=present)
+ returned: when O(state=present)
type: dict
sample:
cpu_limit: 140
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_container_namespace.py b/ansible_collections/community/general/plugins/modules/scaleway_container_namespace.py
index fb01b8672..fd56a7d43 100644
--- a/ansible_collections/community/general/plugins/modules/scaleway_container_namespace.py
+++ b/ansible_collections/community/general/plugins/modules/scaleway_container_namespace.py
@@ -51,7 +51,7 @@ options:
region:
type: str
description:
- - Scaleway region to use (for example C(fr-par)).
+ - Scaleway region to use (for example V(fr-par)).
required: true
choices:
- fr-par
@@ -80,7 +80,7 @@ options:
secret_environment_variables:
description:
- Secret environment variables of the container namespace.
- - Updating thoses values will not output a C(changed) state in Ansible.
+ - Updating those values will not output a C(changed) state in Ansible.
- Injected in containers at runtime.
type: dict
default: {}
@@ -110,7 +110,7 @@ EXAMPLES = '''
RETURN = '''
container_namespace:
description: The container namespace information.
- returned: when I(state=present)
+ returned: when O(state=present)
type: dict
sample:
description: ""
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_container_registry.py b/ansible_collections/community/general/plugins/modules/scaleway_container_registry.py
index 5eee571ec..6344a7ae6 100644
--- a/ansible_collections/community/general/plugins/modules/scaleway_container_registry.py
+++ b/ansible_collections/community/general/plugins/modules/scaleway_container_registry.py
@@ -34,7 +34,7 @@ options:
state:
type: str
description:
- - Indicate desired state of the container regitry.
+ - Indicate desired state of the container registry.
default: present
choices:
- present
@@ -49,7 +49,7 @@ options:
region:
type: str
description:
- - Scaleway region to use (for example C(fr-par)).
+ - Scaleway region to use (for example V(fr-par)).
required: true
choices:
- fr-par
@@ -72,7 +72,7 @@ options:
type: str
description:
- Default visibility policy.
- - Everyone will be able to pull images from a C(public) registry.
+ - Everyone will be able to pull images from a V(public) registry.
choices:
- public
- private
@@ -99,7 +99,7 @@ EXAMPLES = '''
RETURN = '''
container_registry:
description: The container registry information.
- returned: when I(state=present)
+ returned: when O(state=present)
type: dict
sample:
created_at: "2022-10-14T09:51:07.949716Z"
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_database_backup.py b/ansible_collections/community/general/plugins/modules/scaleway_database_backup.py
index edc9f6cab..592ec0b7f 100644
--- a/ansible_collections/community/general/plugins/modules/scaleway_database_backup.py
+++ b/ansible_collections/community/general/plugins/modules/scaleway_database_backup.py
@@ -32,10 +32,10 @@ options:
state:
description:
- Indicate desired state of the database backup.
- - C(present) creates a backup.
- - C(absent) deletes the backup.
- - C(exported) creates a download link for the backup.
- - C(restored) restores the backup to a new database.
+ - V(present) creates a backup.
+ - V(absent) deletes the backup.
+ - V(exported) creates a download link for the backup.
+ - V(restored) restores the backup to a new database.
type: str
default: present
choices:
@@ -46,7 +46,7 @@ options:
region:
description:
- - Scaleway region to use (for example C(fr-par)).
+ - Scaleway region to use (for example V(fr-par)).
type: str
required: true
choices:
@@ -57,37 +57,37 @@ options:
id:
description:
- UUID used to identify the database backup.
- - Required for C(absent), C(exported) and C(restored) states.
+ - Required for V(absent), V(exported) and V(restored) states.
type: str
name:
description:
- Name used to identify the database backup.
- - Required for C(present) state.
- - Ignored when I(state=absent), I(state=exported) or I(state=restored).
+ - Required for V(present) state.
+ - Ignored when O(state=absent), O(state=exported) or O(state=restored).
type: str
required: false
database_name:
description:
- Name used to identify the database.
- - Required for C(present) and C(restored) states.
- - Ignored when I(state=absent) or I(state=exported).
+ - Required for V(present) and V(restored) states.
+ - Ignored when O(state=absent) or O(state=exported).
type: str
required: false
instance_id:
description:
- UUID of the instance associated to the database backup.
- - Required for C(present) and C(restored) states.
- - Ignored when I(state=absent) or I(state=exported).
+ - Required for V(present) and V(restored) states.
+ - Ignored when O(state=absent) or O(state=exported).
type: str
required: false
expires_at:
description:
- Expiration datetime of the database backup (ISO 8601 format).
- - Ignored when I(state=absent), I(state=exported) or I(state=restored).
+ - Ignored when O(state=absent), O(state=exported) or O(state=restored).
type: str
required: false
@@ -145,7 +145,7 @@ EXAMPLES = '''
RETURN = '''
metadata:
description: Backup metadata.
- returned: when I(state=present), I(state=exported) or I(state=restored)
+ returned: when O(state=present), O(state=exported), or O(state=restored)
type: dict
sample: {
"metadata": {
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_function.py b/ansible_collections/community/general/plugins/modules/scaleway_function.py
index 378545866..eb121cd9c 100644
--- a/ansible_collections/community/general/plugins/modules/scaleway_function.py
+++ b/ansible_collections/community/general/plugins/modules/scaleway_function.py
@@ -51,7 +51,7 @@ options:
region:
type: str
description:
- - Scaleway region to use (for example C(fr-par)).
+ - Scaleway region to use (for example V(fr-par)).
required: true
choices:
- fr-par
@@ -90,7 +90,7 @@ options:
secret_environment_variables:
description:
- Secret environment variables of the function.
- - Updating thoses values will not output a C(changed) state in Ansible.
+ - Updating those values will not output a C(changed) state in Ansible.
- Injected in function at runtime.
type: dict
default: {}
@@ -121,7 +121,7 @@ options:
privacy:
description:
- Privacy policies define whether a function can be executed anonymously.
- - Choose C(public) to enable anonymous execution, or C(private) to protect your function with an authentication mechanism provided by the Scaleway API.
+ - Choose V(public) to enable anonymous execution, or V(private) to protect your function with an authentication mechanism provided by the Scaleway API.
type: str
default: public
choices:
@@ -160,7 +160,7 @@ EXAMPLES = '''
RETURN = '''
function:
description: The function information.
- returned: when I(state=present)
+ returned: when O(state=present)
type: dict
sample:
cpu_limit: 140
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_function_namespace.py b/ansible_collections/community/general/plugins/modules/scaleway_function_namespace.py
index f6310b35b..0ea31e9bc 100644
--- a/ansible_collections/community/general/plugins/modules/scaleway_function_namespace.py
+++ b/ansible_collections/community/general/plugins/modules/scaleway_function_namespace.py
@@ -51,7 +51,7 @@ options:
region:
type: str
description:
- - Scaleway region to use (for example C(fr-par)).
+ - Scaleway region to use (for example V(fr-par)).
required: true
choices:
- fr-par
@@ -80,7 +80,7 @@ options:
secret_environment_variables:
description:
- Secret environment variables of the function namespace.
- - Updating thoses values will not output a C(changed) state in Ansible.
+ - Updating those values will not output a C(changed) state in Ansible.
- Injected in functions at runtime.
type: dict
default: {}
@@ -110,7 +110,7 @@ EXAMPLES = '''
RETURN = '''
function_namespace:
description: The function namespace information.
- returned: when I(state=present)
+ returned: when O(state=present)
type: dict
sample:
description: ""
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_ip.py b/ansible_collections/community/general/plugins/modules/scaleway_ip.py
index cf8e2e601..1c9042742 100644
--- a/ansible_collections/community/general/plugins/modules/scaleway_ip.py
+++ b/ansible_collections/community/general/plugins/modules/scaleway_ip.py
@@ -94,8 +94,8 @@ EXAMPLES = '''
RETURN = '''
data:
- description: This is only present when I(state=present).
- returned: when I(state=present)
+ description: This is only present when O(state=present).
+ returned: when O(state=present)
type: dict
sample: {
"ips": [
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_private_network.py b/ansible_collections/community/general/plugins/modules/scaleway_private_network.py
index 33fb7381c..0cc9b900f 100644
--- a/ansible_collections/community/general/plugins/modules/scaleway_private_network.py
+++ b/ansible_collections/community/general/plugins/modules/scaleway_private_network.py
@@ -48,7 +48,7 @@ options:
region:
type: str
description:
- - Scaleway region to use (for example C(par1)).
+ - Scaleway region to use (for example V(par1)).
required: true
choices:
- ams1
@@ -93,7 +93,7 @@ EXAMPLES = '''
RETURN = '''
scaleway_private_network:
description: Information on the VPC.
- returned: success when I(state=present)
+ returned: success when O(state=present)
type: dict
sample:
{
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_security_group.py b/ansible_collections/community/general/plugins/modules/scaleway_security_group.py
index 5523da41c..c09bc34ba 100644
--- a/ansible_collections/community/general/plugins/modules/scaleway_security_group.py
+++ b/ansible_collections/community/general/plugins/modules/scaleway_security_group.py
@@ -45,7 +45,7 @@ options:
region:
description:
- - Scaleway region to use (for example C(par1)).
+ - Scaleway region to use (for example V(par1)).
type: str
required: true
choices:
@@ -110,8 +110,8 @@ EXAMPLES = '''
RETURN = '''
data:
- description: This is only present when I(state=present).
- returned: when I(state=present)
+ description: This is only present when O(state=present).
+ returned: when O(state=present)
type: dict
sample: {
"scaleway_security_group": {
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_security_group_rule.py b/ansible_collections/community/general/plugins/modules/scaleway_security_group_rule.py
index 136631d03..9cbb2eb57 100644
--- a/ansible_collections/community/general/plugins/modules/scaleway_security_group_rule.py
+++ b/ansible_collections/community/general/plugins/modules/scaleway_security_group_rule.py
@@ -22,8 +22,6 @@ description:
extends_documentation_fragment:
- community.general.scaleway
- community.general.attributes
-requirements:
- - ipaddress
attributes:
check_mode:
@@ -44,7 +42,7 @@ options:
region:
type: str
description:
- - Scaleway region to use (for example C(par1)).
+ - Scaleway region to use (for example V(par1)).
required: true
choices:
- ams1
@@ -119,8 +117,8 @@ EXAMPLES = '''
RETURN = '''
data:
- description: This is only present when I(state=present).
- returned: when I(state=present)
+ description: This is only present when O(state=present).
+ returned: when O(state=present)
type: dict
sample: {
"scaleway_security_group_rule": {
@@ -137,19 +135,8 @@ data:
}
'''
-import traceback
-
from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway, payload_from_object
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-try:
- from ipaddress import ip_network # noqa: F401, pylint: disable=unused-import
-except ImportError:
- IPADDRESS_IMP_ERR = traceback.format_exc()
- HAS_IPADDRESS = False
-else:
- IPADDRESS_IMP_ERR = None
- HAS_IPADDRESS = True
+from ansible.module_utils.basic import AnsibleModule
def get_sgr_from_api(security_group_rules, security_group_rule):
@@ -272,8 +259,6 @@ def main():
argument_spec=argument_spec,
supports_check_mode=True,
)
- if not HAS_IPADDRESS:
- module.fail_json(msg=missing_required_lib('ipaddress'), exception=IPADDRESS_IMP_ERR)
core(module)
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_sshkey.py b/ansible_collections/community/general/plugins/modules/scaleway_sshkey.py
index a39e57aa3..5647f9cd0 100644
--- a/ansible_collections/community/general/plugins/modules/scaleway_sshkey.py
+++ b/ansible_collections/community/general/plugins/modules/scaleway_sshkey.py
@@ -72,8 +72,8 @@ EXAMPLES = '''
RETURN = '''
data:
- description: This is only present when I(state=present).
- returned: when I(state=present)
+ description: This is only present when O(state=present).
+ returned: when O(state=present)
type: dict
sample: {
"ssh_public_keys": [
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_volume.py b/ansible_collections/community/general/plugins/modules/scaleway_volume.py
index 2ff09da54..46d72288e 100644
--- a/ansible_collections/community/general/plugins/modules/scaleway_volume.py
+++ b/ansible_collections/community/general/plugins/modules/scaleway_volume.py
@@ -96,8 +96,8 @@ EXAMPLES = '''
RETURN = '''
data:
- description: This is only present when I(state=present).
- returned: when I(state=present)
+ description: This is only present when O(state=present).
+ returned: when O(state=present)
type: dict
sample: {
"volume": {
diff --git a/ansible_collections/community/general/plugins/modules/sefcontext.py b/ansible_collections/community/general/plugins/modules/sefcontext.py
index b2fb36767..19c128fa7 100644
--- a/ansible_collections/community/general/plugins/modules/sefcontext.py
+++ b/ansible_collections/community/general/plugins/modules/sefcontext.py
@@ -36,43 +36,43 @@ options:
description:
- The file type that should have SELinux contexts applied.
- "The following file type options are available:"
- - C(a) for all files,
- - C(b) for block devices,
- - C(c) for character devices,
- - C(d) for directories,
- - C(f) for regular files,
- - C(l) for symbolic links,
- - C(p) for named pipes,
- - C(s) for socket files.
+ - V(a) for all files,
+ - V(b) for block devices,
+ - V(c) for character devices,
+ - V(d) for directories,
+ - V(f) for regular files,
+ - V(l) for symbolic links,
+ - V(p) for named pipes,
+ - V(s) for socket files.
type: str
choices: [ a, b, c, d, f, l, p, s ]
default: a
setype:
description:
- - SELinux type for the specified I(target).
+ - SELinux type for the specified O(target).
type: str
substitute:
description:
- - Path to use to substitute file context(s) for the specified I(target). The context labeling for the I(target) subtree is made equivalent to this path.
+ - Path to use to substitute file context(s) for the specified O(target). The context labeling for the O(target) subtree is made equivalent to this path.
- This is also referred to as SELinux file context equivalence and it implements the C(equal) functionality of the SELinux management tools.
version_added: 6.4.0
type: str
aliases: [ equal ]
seuser:
description:
- - SELinux user for the specified I(target).
- - Defaults to C(system_u) for new file contexts and to existing value when modifying file contexts.
+ - SELinux user for the specified O(target).
+ - Defaults to V(system_u) for new file contexts and to existing value when modifying file contexts.
type: str
selevel:
description:
- - SELinux range for the specified I(target).
- - Defaults to C(s0) for new file contexts and to existing value when modifying file contexts.
+ - SELinux range for the specified O(target).
+ - Defaults to V(s0) for new file contexts and to existing value when modifying file contexts.
type: str
aliases: [ serange ]
state:
description:
- - Whether the SELinux file context must be C(absent) or C(present).
- - Specifying C(absent) without either I(setype) or I(substitute) deletes both SELinux type or path substitution mappings that match I(target).
+ - Whether the SELinux file context must be V(absent) or V(present).
+ - Specifying V(absent) without either O(setype) or O(substitute) deletes both SELinux type or path substitution mappings that match O(target).
type: str
choices: [ absent, present ]
default: present
@@ -89,8 +89,8 @@ options:
default: false
notes:
- The changes are persistent across reboots.
-- I(setype) and I(substitute) are mutually exclusive.
-- If I(state=present) then one of I(setype) or I(substitute) is mandatory.
+- O(setype) and O(substitute) are mutually exclusive.
+- If O(state=present) then one of O(setype) or O(substitute) is mandatory.
- The M(community.general.sefcontext) module does not modify existing files to the new
SELinux context(s), so it is advisable to first create the SELinux
file contexts before creating files, or run C(restorecon) manually
diff --git a/ansible_collections/community/general/plugins/modules/selinux_permissive.py b/ansible_collections/community/general/plugins/modules/selinux_permissive.py
index 7249a01b8..80439e1de 100644
--- a/ansible_collections/community/general/plugins/modules/selinux_permissive.py
+++ b/ansible_collections/community/general/plugins/modules/selinux_permissive.py
@@ -37,7 +37,7 @@ options:
no_reload:
description:
- Disable reloading of the SELinux policy after making change to a domain's permissive setting.
- - The default is C(false), which causes policy to be reloaded when a domain changes state.
+ - The default is V(false), which causes policy to be reloaded when a domain changes state.
- Reloading the policy does not work on older versions of the C(policycoreutils-python) library, for example in EL 6."
type: bool
default: false
diff --git a/ansible_collections/community/general/plugins/modules/sendgrid.py b/ansible_collections/community/general/plugins/modules/sendgrid.py
index 2c0cc9a5b..b4f6b6eaf 100644
--- a/ansible_collections/community/general/plugins/modules/sendgrid.py
+++ b/ansible_collections/community/general/plugins/modules/sendgrid.py
@@ -24,7 +24,6 @@ notes:
account."
- "In order to use api_key, cc, bcc, attachments, from_name, html_body, headers
you must pip install sendgrid"
- - "since 2.2 I(username) and I(password) are not required if you supply an I(api_key)"
requirements:
- sendgrid Python library 1.6.22 or lower (Sendgrid API V2 supported)
extends_documentation_fragment:
@@ -39,12 +38,12 @@ options:
type: str
description:
- Username for logging into the SendGrid account.
- - Since 2.2 it is only required if I(api_key) is not supplied.
+ - It is only required if O(api_key) is not supplied.
password:
type: str
description:
- Password that corresponds to the username.
- - Since 2.2 it is only required if I(api_key) is not supplied.
+ - It is only required if O(api_key) is not supplied.
from_address:
type: str
description:
diff --git a/ansible_collections/community/general/plugins/modules/sensu_check.py b/ansible_collections/community/general/plugins/modules/sensu_check.py
index 1ac2316a8..1430d6a6c 100644
--- a/ansible_collections/community/general/plugins/modules/sensu_check.py
+++ b/ansible_collections/community/general/plugins/modules/sensu_check.py
@@ -16,7 +16,7 @@ short_description: Manage Sensu checks
description:
- Manage the checks that should be run on a machine by I(Sensu).
- Most options do not have a default and will not be added to the check definition unless specified.
- - All defaults except I(path), I(state), I(backup) and I(metric) are not managed by this module,
+ - All defaults except O(path), O(state), O(backup) and O(metric) are not managed by this module,
- they are simply specified for your convenience.
extends_documentation_fragment:
- community.general.attributes
@@ -42,8 +42,8 @@ options:
type: str
description:
- Path to the json file of the check to be added/removed.
- - Will be created if it does not exist (unless I(state=absent)).
- - The parent folders need to exist when I(state=present), otherwise an error will be thrown
+ - Will be created if it does not exist (unless O(state=absent)).
+ - The parent folders need to exist when O(state=present), otherwise an error will be thrown
default: /etc/sensu/conf.d/checks.json
backup:
description:
@@ -54,7 +54,7 @@ options:
command:
type: str
description:
- - Path to the sensu check to run (not required when I(state=absent))
+ - Path to the sensu check to run (not required when O(state=absent))
handlers:
type: list
elements: str
@@ -82,7 +82,7 @@ options:
handle:
description:
- Whether the check should be handled or not
- - Default is C(false).
+ - Default is V(false).
type: bool
subdue_begin:
type: str
@@ -105,14 +105,14 @@ options:
standalone:
description:
- Whether the check should be scheduled by the sensu client or server
- - This option obviates the need for specifying the I(subscribers) option
- - Default is C(false).
+ - This option obviates the need for specifying the O(subscribers) option
+ - Default is V(false).
type: bool
publish:
description:
- Whether the check should be scheduled at all.
- You can still issue it via the sensu api
- - Default is C(false).
+ - Default is V(false).
type: bool
occurrences:
type: int
@@ -127,7 +127,7 @@ options:
description:
- Classifies the check as an aggregate check,
- making it available via the aggregate API
- - Default is C(false).
+ - Default is V(false).
type: bool
low_flap_threshold:
type: int
diff --git a/ansible_collections/community/general/plugins/modules/sensu_client.py b/ansible_collections/community/general/plugins/modules/sensu_client.py
index 2e0bd12ee..eca0804b0 100644
--- a/ansible_collections/community/general/plugins/modules/sensu_client.py
+++ b/ansible_collections/community/general/plugins/modules/sensu_client.py
@@ -77,7 +77,7 @@ options:
deregister:
description:
- If a deregistration event should be created upon Sensu client process stop.
- - Default is C(false).
+ - Default is V(false).
type: bool
deregistration:
type: dict
diff --git a/ansible_collections/community/general/plugins/modules/serverless.py b/ansible_collections/community/general/plugins/modules/serverless.py
index 67d673d4d..8aa9396d6 100644
--- a/ansible_collections/community/general/plugins/modules/serverless.py
+++ b/ansible_collections/community/general/plugins/modules/serverless.py
@@ -46,13 +46,13 @@ options:
region:
description:
- AWS region to deploy the service to.
- - This parameter defaults to C(us-east-1).
+ - This parameter defaults to V(us-east-1).
type: str
default: ''
deploy:
description:
- Whether or not to deploy artifacts after building them.
- - When this option is C(false) all the functions will be built, but no stack update will be run to send them out.
+ - When this option is V(false) all the functions will be built, but no stack update will be run to send them out.
- This is mostly useful for generating artifacts to be stored/deployed elsewhere.
type: bool
default: true
diff --git a/ansible_collections/community/general/plugins/modules/shutdown.py b/ansible_collections/community/general/plugins/modules/shutdown.py
index 5d66fad16..d8108425e 100644
--- a/ansible_collections/community/general/plugins/modules/shutdown.py
+++ b/ansible_collections/community/general/plugins/modules/shutdown.py
@@ -12,8 +12,10 @@ DOCUMENTATION = r'''
module: shutdown
short_description: Shut down a machine
notes:
- - C(PATH) is ignored on the remote node when searching for the C(shutdown) command. Use I(search_paths)
+ - E(PATH) is ignored on the remote node when searching for the C(shutdown) command. Use O(search_paths)
to specify locations to search if the default paths do not work.
+ - The O(msg) and O(delay) options are not supported when a shutdown command is not found in O(search_paths), instead
+ the module will attempt to shutdown the system by calling C(systemctl shutdown).
description:
- Shut downs a machine.
version_added: "1.1.0"
@@ -45,7 +47,7 @@ options:
search_paths:
description:
- Paths to search on the remote machine for the C(shutdown) command.
- - I(Only) these paths will be searched for the C(shutdown) command. C(PATH) is ignored in the remote node when searching for the C(shutdown) command.
+ - I(Only) these paths will be searched for the C(shutdown) command. E(PATH) is ignored in the remote node when searching for the C(shutdown) command.
type: list
elements: path
default: ['/sbin', '/usr/sbin', '/usr/local/sbin']
@@ -74,7 +76,7 @@ EXAMPLES = r'''
RETURN = r'''
shutdown:
- description: C(true) if the machine has been shut down.
+ description: V(true) if the machine has been shut down.
returned: always
type: bool
sample: true
diff --git a/ansible_collections/community/general/plugins/modules/simpleinit_msb.py b/ansible_collections/community/general/plugins/modules/simpleinit_msb.py
new file mode 100644
index 000000000..92738471c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/simpleinit_msb.py
@@ -0,0 +1,322 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016-2023, Vlad Glagolev <scm@vaygr.net>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: simpleinit_msb
+short_description: Manage services on Source Mage GNU/Linux
+version_added: 7.5.0
+description:
+ - Controls services on remote hosts using C(simpleinit-msb).
+notes:
+ - This module needs ansible-core 2.15.5 or newer. Older versions have a broken and insufficient daemonize functionality.
+author: "Vlad Glagolev (@vaygr)"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ type: str
+ description:
+ - Name of the service.
+ required: true
+ aliases: ['service']
+ state:
+ type: str
+ required: false
+ choices: [ running, started, stopped, restarted, reloaded ]
+ description:
+ - V(started)/V(stopped) are idempotent actions that will not run
+ commands unless necessary. V(restarted) will always bounce the
+ service. V(reloaded) will always reload.
+ - At least one of O(state) and O(enabled) are required.
+ - Note that V(reloaded) will start the
+ service if it is not already started, even if your chosen init
+ system would not normally.
+ enabled:
+ type: bool
+ required: false
+ description:
+ - Whether the service should start on boot.
+ - At least one of O(state) and O(enabled) are required.
+'''
+
+EXAMPLES = '''
+- name: Example action to start service httpd, if not running
+ community.general.simpleinit_msb:
+ name: httpd
+ state: started
+
+- name: Example action to stop service httpd, if running
+ community.general.simpleinit_msb:
+ name: httpd
+ state: stopped
+
+- name: Example action to restart service httpd, in all cases
+ community.general.simpleinit_msb:
+ name: httpd
+ state: restarted
+
+- name: Example action to reload service httpd, in all cases
+ community.general.simpleinit_msb:
+ name: httpd
+ state: reloaded
+
+- name: Example action to enable service httpd, and not touch the running state
+ community.general.simpleinit_msb:
+ name: httpd
+ enabled: true
+'''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.service import daemonize
+
+
+class SimpleinitMSB(object):
+ """
+ Main simpleinit-msb service manipulation class
+ """
+
+ def __init__(self, module):
+ self.module = module
+ self.name = module.params['name']
+ self.state = module.params['state']
+ self.enable = module.params['enabled']
+ self.changed = False
+ self.running = None
+ self.action = None
+ self.telinit_cmd = None
+ self.svc_change = False
+
+ def execute_command(self, cmd, daemon=False):
+ if not daemon:
+ return self.module.run_command(cmd)
+ else:
+ return daemonize(self.module, cmd)
+
+ def check_service_changed(self):
+ if self.state and self.running is None:
+ self.module.fail_json(msg="failed determining service state, possible typo of service name?")
+ # Find out if state has changed
+ if not self.running and self.state in ["started", "running", "reloaded"]:
+ self.svc_change = True
+ elif self.running and self.state in ["stopped", "reloaded"]:
+ self.svc_change = True
+ elif self.state == "restarted":
+ self.svc_change = True
+ if self.module.check_mode and self.svc_change:
+ self.module.exit_json(changed=True, msg='service state changed')
+
+ def modify_service_state(self):
+ # Only do something if state will change
+ if self.svc_change:
+ # Control service
+ if self.state in ['started', 'running']:
+ self.action = "start"
+ elif not self.running and self.state == 'reloaded':
+ self.action = "start"
+ elif self.state == 'stopped':
+ self.action = "stop"
+ elif self.state == 'reloaded':
+ self.action = "reload"
+ elif self.state == 'restarted':
+ self.action = "restart"
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, msg='changing service state')
+
+ return self.service_control()
+ else:
+ # If nothing needs to change just say all is well
+ rc = 0
+ err = ''
+ out = ''
+ return rc, out, err
+
+ def get_service_tools(self):
+ paths = ['/sbin', '/usr/sbin', '/bin', '/usr/bin']
+ binaries = ['telinit']
+ location = dict()
+
+ for binary in binaries:
+ location[binary] = self.module.get_bin_path(binary, opt_dirs=paths)
+
+ if location.get('telinit', False) and os.path.exists("/etc/init.d/smgl_init"):
+ self.telinit_cmd = location['telinit']
+
+ if self.telinit_cmd is None:
+ self.module.fail_json(msg='cannot find telinit script for simpleinit-msb, aborting...')
+
+ def get_service_status(self):
+ self.action = "status"
+ rc, status_stdout, status_stderr = self.service_control()
+
+ if self.running is None and status_stdout.count('\n') <= 1:
+ cleanout = status_stdout.lower().replace(self.name.lower(), '')
+
+ if "is not running" in cleanout:
+ self.running = False
+ elif "is running" in cleanout:
+ self.running = True
+
+ return self.running
+
+ def service_enable(self):
+ # Check if the service is already enabled/disabled
+ if not self.enable ^ self.service_enabled():
+ return
+
+ action = "boot" + ("enable" if self.enable else "disable")
+
+ (rc, out, err) = self.execute_command("%s %s %s" % (self.telinit_cmd, action, self.name))
+
+ self.changed = True
+
+ for line in err.splitlines():
+ if self.enable and line.find('already enabled') != -1:
+ self.changed = False
+ break
+ if not self.enable and line.find('already disabled') != -1:
+ self.changed = False
+ break
+
+ if not self.changed:
+ return
+
+ return (rc, out, err)
+
+ def service_enabled(self):
+ self.service_exists()
+
+ (rc, out, err) = self.execute_command("%s %sd" % (self.telinit_cmd, self.enable))
+
+ service_enabled = False if self.enable else True
+
+ rex = re.compile(r'^%s$' % self.name)
+
+ for line in out.splitlines():
+ if rex.match(line):
+ service_enabled = True if self.enable else False
+ break
+
+ return service_enabled
+
+ def service_exists(self):
+ (rc, out, err) = self.execute_command("%s list" % self.telinit_cmd)
+
+ service_exists = False
+
+ rex = re.compile(r'^\w+\s+%s$' % self.name)
+
+ for line in out.splitlines():
+ if rex.match(line):
+ service_exists = True
+ break
+
+ if not service_exists:
+ self.module.fail_json(msg='telinit could not find the requested service: %s' % self.name)
+
+ def service_control(self):
+ self.service_exists()
+
+ svc_cmd = "%s run %s" % (self.telinit_cmd, self.name)
+
+ rc_state, stdout, stderr = self.execute_command("%s %s" % (svc_cmd, self.action), daemon=True)
+
+ return (rc_state, stdout, stderr)
+
+
+def build_module():
+ return AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, aliases=['service']),
+ state=dict(choices=['running', 'started', 'stopped', 'restarted', 'reloaded']),
+ enabled=dict(type='bool'),
+ ),
+ supports_check_mode=True,
+ required_one_of=[['state', 'enabled']],
+ )
+
+
+def main():
+ module = build_module()
+
+ service = SimpleinitMSB(module)
+
+ rc = 0
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = service.name
+
+ # Find service management tools
+ service.get_service_tools()
+
+ # Enable/disable service startup at boot if requested
+ if service.module.params['enabled'] is not None:
+ service.service_enable()
+ result['enabled'] = service.enable
+
+ if module.params['state'] is None:
+ # Not changing the running state, so bail out now.
+ result['changed'] = service.changed
+ module.exit_json(**result)
+
+ result['state'] = service.state
+
+ service.get_service_status()
+
+ # Calculate if request will change service state
+ service.check_service_changed()
+
+ # Modify service state if necessary
+ (rc, out, err) = service.modify_service_state()
+
+ if rc != 0:
+ if err:
+ module.fail_json(msg=err)
+ else:
+ module.fail_json(msg=out)
+
+ result['changed'] = service.changed | service.svc_change
+ if service.module.params['enabled'] is not None:
+ result['enabled'] = service.module.params['enabled']
+
+ if not service.module.params['state']:
+ status = service.get_service_status()
+ if status is None:
+ result['state'] = 'absent'
+ elif status is False:
+ result['state'] = 'started'
+ else:
+ result['state'] = 'stopped'
+ else:
+ # as we may have just bounced the service the service command may not
+ # report accurate state at this moment so just show what we ran
+ if service.module.params['state'] in ['started', 'restarted', 'running', 'reloaded']:
+ result['state'] = 'started'
+ else:
+ result['state'] = 'stopped'
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/sl_vm.py b/ansible_collections/community/general/plugins/modules/sl_vm.py
index 94055d1d2..1604ffc11 100644
--- a/ansible_collections/community/general/plugins/modules/sl_vm.py
+++ b/ansible_collections/community/general/plugins/modules/sl_vm.py
@@ -158,7 +158,7 @@ options:
state:
description:
- Create, or cancel a virtual instance.
- - Specify C(present) for create, C(absent) to cancel.
+ - Specify V(present) for create, V(absent) to cancel.
choices: [ absent, present ]
default: present
type: str
@@ -173,7 +173,6 @@ options:
default: 600
type: int
requirements:
- - python >= 2.6
- softlayer >= 4.1.1
author:
- Matt Colton (@mcltn)
diff --git a/ansible_collections/community/general/plugins/modules/slack.py b/ansible_collections/community/general/plugins/modules/slack.py
index 4e26f1973..41dd4f5db 100644
--- a/ansible_collections/community/general/plugins/modules/slack.py
+++ b/ansible_collections/community/general/plugins/modules/slack.py
@@ -19,7 +19,7 @@ DOCUMENTATION = """
module: slack
short_description: Send Slack notifications
description:
- - The C(slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration
+ - The M(community.general.slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration
author: "Ramon de la Fuente (@ramondelafuente)"
extends_documentation_fragment:
- community.general.attributes
@@ -32,8 +32,8 @@ options:
domain:
type: str
description:
- - Slack (sub)domain for your environment without protocol. (i.e.
- C(example.slack.com)) In 1.8 and beyond, this is deprecated and may
+ - Slack (sub)domain for your environment without protocol. (For example
+ V(example.slack.com).) In Ansible 1.8 and beyond, this is deprecated and may
be ignored. See token documentation for information.
token:
type: str
@@ -41,9 +41,9 @@ options:
- Slack integration token. This authenticates you to the slack service.
Make sure to use the correct type of token, depending on what method you use.
- "Webhook token:
- Prior to 1.8, a token looked like C(3Ffe373sfhRE6y42Fg3rvf4GlK). In
- 1.8 and above, ansible adapts to the new slack API where tokens look
- like C(G922VJP24/D921DW937/3Ffe373sfhRE6y42Fg3rvf4GlK). If tokens
+ Prior to Ansible 1.8, a token looked like V(3Ffe373sfhRE6y42Fg3rvf4GlK). In
+ Ansible 1.8 and above, Ansible adapts to the new slack API where tokens look
+ like V(G922VJP24/D921DW937/3Ffe373sfhRE6y42Fg3rvf4GlK). If tokens
are in the new format then slack will ignore any value of domain. If
the token is in the old format the domain is required. Ansible has no
control of when slack will get rid of the old API. When slack does
@@ -55,8 +55,8 @@ options:
that the incoming webhooks can be added. The key is on the end of the
URL given to you in that section."
- "WebAPI token:
- Slack WebAPI requires a personal, bot or work application token. These tokens start with C(xoxp-), C(xoxb-)
- or C(xoxa-), eg. C(xoxb-1234-56789abcdefghijklmnop). WebAPI token is required if you intend to receive thread_id.
+ Slack WebAPI requires a personal, bot or work application token. These tokens start with V(xoxp-), V(xoxb-)
+ or V(xoxa-), for example V(xoxb-1234-56789abcdefghijklmnop). WebAPI token is required if you intend to receive thread_id.
See Slack's documentation (U(https://api.slack.com/docs/token-types)) for more information."
required: true
msg:
@@ -68,7 +68,7 @@ options:
channel:
type: str
description:
- - Channel to send the message to. If absent, the message goes to the channel selected for the I(token).
+ - Channel to send the message to. If absent, the message goes to the channel selected for the O(token).
thread_id:
description:
- Optional. Timestamp of parent message to thread this message. https://api.slack.com/docs/message-threading
@@ -76,7 +76,7 @@ options:
message_id:
description:
- Optional. Message ID to edit, instead of posting a new message.
- - If supplied I(channel_id) must be in form of C(C0xxxxxxx). use C({{ slack_response.channel_id }}) to get I(channel_id) from previous task run.
+ - If supplied O(channel) must be in form of C(C0xxxxxxx). use C({{ slack_response.channel_id }}) to get RV(ignore:channel_id) from previous task run.
- Corresponds to C(ts) in the Slack API (U(https://api.slack.com/messaging/modifying)).
type: str
version_added: 1.2.0
@@ -88,17 +88,17 @@ options:
icon_url:
type: str
description:
- - URL for the message sender's icon (default C(https://docs.ansible.com/favicon.ico))
+ - URL for the message sender's icon.
default: https://docs.ansible.com/favicon.ico
icon_emoji:
type: str
description:
- Emoji for the message sender. See Slack documentation for options.
- (if I(icon_emoji) is set, I(icon_url) will not be used)
+ - If O(icon_emoji) is set, O(icon_url) will not be used.
link_names:
type: int
description:
- - Automatically create links for channels and usernames in I(msg).
+ - Automatically create links for channels and usernames in O(msg).
default: 1
choices:
- 1
@@ -112,7 +112,7 @@ options:
- 'none'
validate_certs:
description:
- - If C(false), SSL certificates will not be validated. This should only be used
+ - If V(false), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
type: bool
default: true
@@ -121,7 +121,6 @@ options:
description:
- Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message.
- Allowed values for color can be one of 'normal', 'good', 'warning', 'danger', any valid 3 digit or 6 digit hex color value.
- - Specifying value in hex is supported since Ansible 2.8.
default: 'normal'
attachments:
type: list
@@ -139,12 +138,12 @@ options:
prepend_hash:
type: str
description:
- - Setting for automatically prepending a C(#) symbol on the passed in I(channel_id).
- - The C(auto) method prepends a C(#) unless I(channel_id) starts with one of C(#), C(@), C(C0), C(GF), C(G0), C(CP).
- These prefixes only cover a small set of the prefixes that should not have a C(#) prepended.
- Since an exact condition which I(channel_id) values must not have the C(#) prefix is not known,
- the value C(auto) for this option will be deprecated in the future. It is best to explicitly set
- I(prepend_hash=always) or I(prepend_hash=never) to obtain the needed behavior.
+ - Setting for automatically prepending a V(#) symbol on the passed in O(channel).
+ - The V(auto) method prepends a V(#) unless O(channel) starts with one of V(#), V(@), V(C0), V(GF), V(G0), V(CP).
+ These prefixes only cover a small set of the prefixes that should not have a V(#) prepended.
+ Since an exact condition which O(channel) values must not have the V(#) prefix is not known,
+ the value V(auto) for this option will be deprecated in the future. It is best to explicitly set
+ O(prepend_hash=always) or O(prepend_hash=never) to obtain the needed behavior.
choices:
- 'always'
- 'never'
diff --git a/ansible_collections/community/general/plugins/modules/slackpkg.py b/ansible_collections/community/general/plugins/modules/slackpkg.py
index 208061a4c..e3d7a1542 100644
--- a/ansible_collections/community/general/plugins/modules/slackpkg.py
+++ b/ansible_collections/community/general/plugins/modules/slackpkg.py
@@ -40,7 +40,7 @@ options:
state:
description:
- - state of the package, you can use "installed" as an alias for C(present) and removed as one for C(absent).
+ - State of the package, you can use V(installed) as an alias for V(present) and V(removed) as one for V(absent).
choices: [ 'present', 'absent', 'latest', 'installed', 'removed' ]
required: false
default: present
diff --git a/ansible_collections/community/general/plugins/modules/smartos_image_info.py b/ansible_collections/community/general/plugins/modules/smartos_image_info.py
index e93ffb9ac..1a25b4668 100644
--- a/ansible_collections/community/general/plugins/modules/smartos_image_info.py
+++ b/ansible_collections/community/general/plugins/modules/smartos_image_info.py
@@ -15,8 +15,6 @@ module: smartos_image_info
short_description: Get SmartOS image details
description:
- Retrieve information about all installed images on SmartOS.
- - This module was called C(smartos_image_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(community.general.smartos_image_info) module no longer returns C(ansible_facts)!
author: Adam Å tevko (@xen0l)
extends_documentation_fragment:
- community.general.attributes
@@ -29,9 +27,9 @@ options:
filters:
description:
- Criteria for selecting image. Can be any value from image
- manifest and 'published_date', 'published', 'source', 'clones',
- and 'size'. More information can be found at U(https://smartos.org/man/1m/imgadm)
- under 'imgadm list'.
+ manifest and C(published_date), C(published), C(source), C(clones),
+ and C(size). More information can be found at U(https://smartos.org/man/1m/imgadm)
+ under C(imgadm list).
type: str
'''
diff --git a/ansible_collections/community/general/plugins/modules/snap.py b/ansible_collections/community/general/plugins/modules/snap.py
index 4b798d6e2..fd1676480 100644
--- a/ansible_collections/community/general/plugins/modules/snap.py
+++ b/ansible_collections/community/general/plugins/modules/snap.py
@@ -17,7 +17,7 @@ DOCUMENTATION = '''
module: snap
short_description: Manages snaps
description:
- - "Manages snaps packages."
+ - Manages snaps packages.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -28,14 +28,20 @@ attributes:
options:
name:
description:
- - Name of the snaps.
+ - Name of the snaps to be installed.
+ - Any named snap accepted by the C(snap) command is valid.
+ - >
+ Notice that snap files might require O(dangerous=true) to ignore the error
+ "cannot find signatures with metadata for snap".
required: true
type: list
elements: str
state:
description:
- Desired state of the package.
- required: false
+ - >
+ When O(state=present) the module will use C(snap install) if the snap is not installed,
+ and C(snap refresh) if it is installed but from a different channel.
default: present
choices: [ absent, present, enabled, disabled ]
type: str
@@ -43,7 +49,7 @@ options:
description:
- Confinement policy. The classic confinement allows a snap to have
the same level of access to the system as "classic" packages,
- like those managed by APT. This option corresponds to the --classic argument.
+ like those managed by APT. This option corresponds to the C(--classic) argument.
This option can only be specified if there is a single snap in the task.
type: bool
required: false
@@ -52,18 +58,29 @@ options:
description:
- Define which release of a snap is installed and tracked for updates.
This option can only be specified if there is a single snap in the task.
+ - If not passed, the C(snap) command will default to V(stable).
+ - If the value passed does not contain the C(track), it will default to C(latest).
+ For example, if V(edge) is passed, the module will assume the channel to be V(latest/edge).
+ - See U(https://snapcraft.io/docs/channels) for more details about snap channels.
type: str
required: false
- default: stable
options:
description:
- Set options with pattern C(key=value) or C(snap:key=value). If a snap name is given, the option will be applied
- to that snap only. If the snap name is omitted, the options will be applied to all snaps listed in I(name). Options will
+ to that snap only. If the snap name is omitted, the options will be applied to all snaps listed in O(name). Options will
only be applied to active snaps.
required: false
type: list
elements: str
version_added: 4.4.0
+ dangerous:
+ description:
+ - Install the given snap file even if there are no pre-acknowledged signatures for it,
+ meaning it was not verified and could be dangerous.
+ type: bool
+ required: false
+ default: false
+ version_added: 7.2.0
author:
- Victor Carceler (@vcarceler) <vcarceler@iespuigcastellar.xeill.net>
@@ -154,51 +171,29 @@ import numbers
from ansible.module_utils.common.text.converters import to_native
-from ansible_collections.community.general.plugins.module_utils.module_helper import (
- CmdStateModuleHelper, ArgFormat
-)
+from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper
+from ansible_collections.community.general.plugins.module_utils.snap import snap_runner
-__state_map = dict(
- present='install',
- absent='remove',
- enabled='enable',
- disabled='disable',
- info='info', # not public
- list='list', # not public
- set='set', # not public
- get='get', # not public
-)
+class Snap(StateModuleHelper):
+ NOT_INSTALLED = 0
+ CHANNEL_MISMATCH = 1
+ INSTALLED = 2
-
-def _state_map(value):
- return [__state_map[value]]
-
-
-class Snap(CmdStateModuleHelper):
__disable_re = re.compile(r'(?:\S+\s+){5}(?P<notes>\S+)')
__set_param_re = re.compile(r'(?P<snap_prefix>\S+:)?(?P<key>\S+)\s*=\s*(?P<value>.+)')
+ __list_re = re.compile(r'^(?P<name>\S+)\s+\S+\s+\S+\s+(?P<channel>\S+)')
module = dict(
argument_spec={
'name': dict(type='list', elements='str', required=True),
- 'state': dict(type='str', default='present',
- choices=['absent', 'present', 'enabled', 'disabled']),
+ 'state': dict(type='str', default='present', choices=['absent', 'present', 'enabled', 'disabled']),
'classic': dict(type='bool', default=False),
- 'channel': dict(type='str', default='stable'),
+ 'channel': dict(type='str'),
'options': dict(type='list', elements='str'),
+ 'dangerous': dict(type='bool', default=False),
},
supports_check_mode=True,
)
- command = "snap"
- command_args_formats = dict(
- actionable_snaps=dict(fmt=lambda v: v),
- state=dict(fmt=_state_map),
- classic=dict(fmt="--classic", style=ArgFormat.BOOLEAN),
- channel=dict(fmt=lambda v: [] if v == 'stable' else ['--channel', '{0}'.format(v)]),
- options=dict(fmt=list),
- json_format=dict(fmt="-d", style=ArgFormat.BOOLEAN),
- )
- check_rc = False
@staticmethod
def _first_non_zero(a):
@@ -208,19 +203,63 @@ class Snap(CmdStateModuleHelper):
return 0
- def _run_multiple_commands(self, commands):
- outputs = [(c,) + self.run_command(params=c) for c in commands]
- results = ([], [], [], [])
- for output in outputs:
- for i in range(4):
- results[i].append(output[i])
-
- return [
- '; '.join([to_native(x) for x in results[0]]),
- self._first_non_zero(results[1]),
- '\n'.join(results[2]),
- '\n'.join(results[3]),
- ]
+ def __init_module__(self):
+ self.runner = snap_runner(self.module)
+ # if state=present there might be file names passed in 'name', in
+ # which case they must be converted to their actual snap names, which
+ # is done using the names_from_snaps() method calling 'snap info'.
+ self.vars.set("snapinfo_run_info", [], output=(self.verbosity >= 4))
+ self.vars.set("status_run_info", [], output=(self.verbosity >= 4))
+ self.vars.set("status_out", None, output=(self.verbosity >= 4))
+ self.vars.set("run_info", [], output=(self.verbosity >= 4))
+
+ if self.vars.state == "present":
+ self.vars.set("snap_names", self.names_from_snaps(self.vars.name))
+ status_var = "snap_names"
+ else:
+ status_var = "name"
+ self.vars.set("status_var", status_var, output=False)
+ self.vars.set("snap_status", self.snap_status(self.vars[self.vars.status_var], self.vars.channel), output=False, change=True)
+ self.vars.set("snap_status_map", dict(zip(self.vars.name, self.vars.snap_status)), output=False, change=True)
+
+ def __quit_module__(self):
+ self.vars.snap_status = self.snap_status(self.vars[self.vars.status_var], self.vars.channel)
+ if self.vars.channel is None:
+ self.vars.channel = "stable"
+
+ def _run_multiple_commands(self, commands, actionable_names, bundle=True, refresh=False):
+ results_cmd = []
+ results_rc = []
+ results_out = []
+ results_err = []
+ results_run_info = []
+
+ state = "refresh" if refresh else self.vars.state
+
+ with self.runner(commands + ["name"]) as ctx:
+ if bundle:
+ rc, out, err = ctx.run(state=state, name=actionable_names)
+ results_cmd.append(commands + actionable_names)
+ results_rc.append(rc)
+ results_out.append(out.strip())
+ results_err.append(err.strip())
+ results_run_info.append(ctx.run_info)
+ else:
+ for name in actionable_names:
+ rc, out, err = ctx.run(state=state, name=name)
+ results_cmd.append(commands + [name])
+ results_rc.append(rc)
+ results_out.append(out.strip())
+ results_err.append(err.strip())
+ results_run_info.append(ctx.run_info)
+
+ return (
+ '; '.join([to_native(x) for x in results_cmd]),
+ self._first_non_zero(results_rc),
+ '\n'.join(results_out),
+ '\n'.join(results_err),
+ results_run_info,
+ )
def convert_json_subtree_to_map(self, json_subtree, prefix=None):
option_map = {}
@@ -234,7 +273,6 @@ class Snap(CmdStateModuleHelper):
if isinstance(value, (str, float, bool, numbers.Integral)):
option_map[full_key] = str(value)
-
else:
option_map.update(self.convert_json_subtree_to_map(json_subtree=value, prefix=full_key))
@@ -245,8 +283,8 @@ class Snap(CmdStateModuleHelper):
return self.convert_json_subtree_to_map(json_object)
def retrieve_option_map(self, snap_name):
- params = [{'state': 'get'}, {'name': snap_name}, {'json_format': True}]
- rc, out, err = self.run_command(params=params)
+ with self.runner("get name") as ctx:
+ rc, out, err = ctx.run(name=snap_name)
if rc != 0:
return {}
@@ -258,18 +296,73 @@ class Snap(CmdStateModuleHelper):
try:
option_map = self.convert_json_to_map(out)
-
+ return option_map
except Exception as e:
self.do_raise(
msg="Parsing option map returned by 'snap get {0}' triggers exception '{1}', output:\n'{2}'".format(snap_name, str(e), out))
- return option_map
+ def names_from_snaps(self, snaps):
+ def process_one(rc, out, err):
+ res = [line for line in out.split("\n") if line.startswith("name:")]
+ name = res[0].split()[1]
+ return [name]
+
+ def process_many(rc, out, err):
+ # This needs to be "\n---" instead of just "---" because otherwise
+ # if a snap uses "---" in its description then that will incorrectly
+ # be interpreted as a separator between snaps in the output.
+ outputs = out.split("\n---")
+ res = []
+ for sout in outputs:
+ res.extend(process_one(rc, sout, ""))
+ return res
+
+ def process(rc, out, err):
+ if len(snaps) == 1:
+ check_error = err
+ process_ = process_one
+ else:
+ check_error = out
+ process_ = process_many
+
+ if "warning: no snap found" in check_error:
+ self.do_raise("Snaps not found: {0}.".format([x.split()[-1]
+ for x in out.split('\n')
+ if x.startswith("warning: no snap found")]))
+ return process_(rc, out, err)
+
+ names = []
+ if snaps:
+ with self.runner("info name", output_process=process) as ctx:
+ try:
+ names = ctx.run(name=snaps)
+ finally:
+ self.vars.snapinfo_run_info.append(ctx.run_info)
+ return names
+
+ def snap_status(self, snap_name, channel):
+ def _status_check(name, channel, installed):
+ match = [c for n, c in installed if n == name]
+ if not match:
+ return Snap.NOT_INSTALLED
+ if channel and match[0] not in (channel, "latest/{0}".format(channel)):
+ return Snap.CHANNEL_MISMATCH
+ else:
+ return Snap.INSTALLED
+
+ with self.runner("_list") as ctx:
+ rc, out, err = ctx.run(check_rc=True)
+ list_out = out.split('\n')[1:]
+ list_out = [self.__list_re.match(x) for x in list_out]
+ list_out = [(m.group('name'), m.group('channel')) for m in list_out if m]
+ self.vars.status_out = list_out
+ self.vars.status_run_info = ctx.run_info
- def is_snap_installed(self, snap_name):
- return 0 == self.run_command(params=[{'state': 'list'}, {'name': snap_name}])[0]
+ return [_status_check(n, channel, list_out) for n in snap_name]
def is_snap_enabled(self, snap_name):
- rc, out, err = self.run_command(params=[{'state': 'list'}, {'name': snap_name}])
+ with self.runner("_list name") as ctx:
+ rc, out, err = ctx.run(name=snap_name)
if rc != 0:
return None
result = out.splitlines()[1]
@@ -279,22 +372,22 @@ class Snap(CmdStateModuleHelper):
notes = match.group('notes')
return "disabled" not in notes.split(',')
- def process_actionable_snaps(self, actionable_snaps):
+ def _present(self, actionable_snaps, refresh=False):
self.changed = True
self.vars.snaps_installed = actionable_snaps
- if self.module.check_mode:
+ if self.check_mode:
return
- params = ['state', 'classic', 'channel'] # get base cmd parts
+ params = ['state', 'classic', 'channel', 'dangerous'] # get base cmd parts
has_one_pkg_params = bool(self.vars.classic) or self.vars.channel != 'stable'
has_multiple_snaps = len(actionable_snaps) > 1
if has_one_pkg_params and has_multiple_snaps:
- commands = [params + [{'actionable_snaps': [s]}] for s in actionable_snaps]
+ self.vars.cmd, rc, out, err, run_info = self._run_multiple_commands(params, actionable_snaps, bundle=False, refresh=refresh)
else:
- commands = [params + [{'actionable_snaps': actionable_snaps}]]
- self.vars.cmd, rc, out, err = self._run_multiple_commands(commands)
+ self.vars.cmd, rc, out, err, run_info = self._run_multiple_commands(params, actionable_snaps, refresh=refresh)
+ self.vars.run_info = run_info
if rc == 0:
return
@@ -314,10 +407,13 @@ class Snap(CmdStateModuleHelper):
self.vars.meta('classic').set(output=True)
self.vars.meta('channel').set(output=True)
- actionable_snaps = [s for s in self.vars.name if not self.is_snap_installed(s)]
- if actionable_snaps:
- self.process_actionable_snaps(actionable_snaps)
+ actionable_refresh = [snap for snap in self.vars.name if self.vars.snap_status_map[snap] == Snap.CHANNEL_MISMATCH]
+ if actionable_refresh:
+ self._present(actionable_refresh, refresh=True)
+ actionable_install = [snap for snap in self.vars.name if self.vars.snap_status_map[snap] == Snap.NOT_INSTALLED]
+ if actionable_install:
+ self._present(actionable_install)
self.set_options()
@@ -325,7 +421,7 @@ class Snap(CmdStateModuleHelper):
if self.vars.options is None:
return
- actionable_snaps = [s for s in self.vars.name if self.is_snap_installed(s)]
+ actionable_snaps = [s for s in self.vars.name if self.vars.snap_status_map[s] != Snap.NOT_INSTALLED]
overall_options_changed = []
for snap_name in actionable_snaps:
@@ -360,11 +456,9 @@ class Snap(CmdStateModuleHelper):
if options_changed:
self.changed = True
- if not self.module.check_mode:
- params = [{'state': 'set'}, {'name': snap_name}, {'options': options_changed}]
-
- rc, out, err = self.run_command(params=params)
-
+ if not self.check_mode:
+ with self.runner("_set name options") as ctx:
+ rc, out, err = ctx.run(name=snap_name, options=options_changed)
if rc != 0:
if 'has no "configure" hook' in err:
msg = "Snap '{snap}' does not have any configurable options".format(snap=snap_name)
@@ -377,18 +471,16 @@ class Snap(CmdStateModuleHelper):
if overall_options_changed:
self.vars.options_changed = overall_options_changed
- def _generic_state_action(self, actionable_func, actionable_var, params=None):
+ def _generic_state_action(self, actionable_func, actionable_var, params):
actionable_snaps = [s for s in self.vars.name if actionable_func(s)]
if not actionable_snaps:
return
self.changed = True
self.vars[actionable_var] = actionable_snaps
- if self.module.check_mode:
+ if self.check_mode:
return
- if params is None:
- params = ['state']
- commands = [params + [{'actionable_snaps': actionable_snaps}]]
- self.vars.cmd, rc, out, err = self._run_multiple_commands(commands)
+ self.vars.cmd, rc, out, err, run_info = self._run_multiple_commands(params, actionable_snaps)
+ self.vars.run_info = run_info
if rc == 0:
return
msg = "Ooops! Snap operation failed while executing '{cmd}', please examine logs and " \
@@ -396,7 +488,7 @@ class Snap(CmdStateModuleHelper):
self.do_raise(msg=msg)
def state_absent(self):
- self._generic_state_action(self.is_snap_installed, "snaps_removed", ['classic', 'channel', 'state'])
+ self._generic_state_action(lambda s: self.vars.snap_status_map[s] != Snap.NOT_INSTALLED, "snaps_removed", ['classic', 'channel', 'state'])
def state_enabled(self):
self._generic_state_action(lambda s: not self.is_snap_enabled(s), "snaps_enabled", ['classic', 'channel', 'state'])
diff --git a/ansible_collections/community/general/plugins/modules/snap_alias.py b/ansible_collections/community/general/plugins/modules/snap_alias.py
index 19fbef003..54448c6f3 100644
--- a/ansible_collections/community/general/plugins/modules/snap_alias.py
+++ b/ansible_collections/community/general/plugins/modules/snap_alias.py
@@ -86,15 +86,8 @@ snap_aliases:
import re
-from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper
-
-
-_state_map = dict(
- present='alias',
- absent='unalias',
- info='aliases',
-)
+from ansible_collections.community.general.plugins.module_utils.snap import snap_runner
class SnapAlias(StateModuleHelper):
@@ -113,18 +106,12 @@ class SnapAlias(StateModuleHelper):
supports_check_mode=True,
)
- command_args_formats = {
- "state": cmd_runner_fmt.as_map(_state_map),
- "name": cmd_runner_fmt.as_list(),
- "alias": cmd_runner_fmt.as_list(),
- }
-
def _aliases(self):
n = self.vars.name
return {n: self._get_aliases_for(n)} if n else self._get_aliases()
def __init_module__(self):
- self.runner = CmdRunner(self.module, "snap", self.command_args_formats, check_rc=False)
+ self.runner = snap_runner(self.module)
self.vars.set("snap_aliases", self._aliases(), change=True, diff=True)
def __quit_module__(self):
@@ -141,8 +128,8 @@ class SnapAlias(StateModuleHelper):
results[snap] = results.get(snap, []) + [alias]
return results
- with self.runner("state name", check_rc=True, output_process=process) as ctx:
- aliases = ctx.run(state="info")
+ with self.runner("state_alias name", check_rc=True, output_process=process) as ctx:
+ aliases = ctx.run(state_alias="info")
if self.verbosity >= 4:
self.vars.get_aliases_run_info = ctx.run_info
return aliases
@@ -164,8 +151,8 @@ class SnapAlias(StateModuleHelper):
for _alias in self.vars.alias:
if not self._has_alias(self.vars.name, _alias):
self.changed = True
- with self.runner("state name alias", check_mode_skip=True) as ctx:
- ctx.run(alias=_alias)
+ with self.runner("state_alias name alias", check_mode_skip=True) as ctx:
+ ctx.run(state_alias=self.vars.state, alias=_alias)
if self.verbosity >= 4:
self.vars.run_info = ctx.run_info
@@ -173,16 +160,16 @@ class SnapAlias(StateModuleHelper):
if not self.vars.alias:
if self._has_alias(self.vars.name):
self.changed = True
- with self.runner("state name", check_mode_skip=True) as ctx:
- ctx.run()
+ with self.runner("state_alias name", check_mode_skip=True) as ctx:
+ ctx.run(state_alias=self.vars.state)
if self.verbosity >= 4:
self.vars.run_info = ctx.run_info
else:
for _alias in self.vars.alias:
if self._has_alias(self.vars.name, _alias):
self.changed = True
- with self.runner("state alias", check_mode_skip=True) as ctx:
- ctx.run(alias=_alias)
+ with self.runner("state_alias alias", check_mode_skip=True) as ctx:
+ ctx.run(state_alias=self.vars.state, alias=_alias)
if self.verbosity >= 4:
self.vars.run_info = ctx.run_info
diff --git a/ansible_collections/community/general/plugins/modules/snmp_facts.py b/ansible_collections/community/general/plugins/modules/snmp_facts.py
index e54473ffa..aecc08f32 100644
--- a/ansible_collections/community/general/plugins/modules/snmp_facts.py
+++ b/ansible_collections/community/general/plugins/modules/snmp_facts.py
@@ -36,46 +36,46 @@ options:
required: true
version:
description:
- - SNMP Version to use, C(v2), C(v2c) or C(v3).
+ - SNMP Version to use, V(v2), V(v2c) or V(v3).
type: str
required: true
choices: [ v2, v2c, v3 ]
community:
description:
- - The SNMP community string, required if I(version) is C(v2) or C(v2c).
+ - The SNMP community string, required if O(version) is V(v2) or V(v2c).
type: str
level:
description:
- Authentication level.
- - Required if I(version) is C(v3).
+ - Required if O(version=v3).
type: str
choices: [ authNoPriv, authPriv ]
username:
description:
- Username for SNMPv3.
- - Required if I(version) is C(v3).
+ - Required if O(version=v3).
type: str
integrity:
description:
- Hashing algorithm.
- - Required if I(version) is C(v3).
+ - Required if O(version=v3).
type: str
choices: [ md5, sha ]
authkey:
description:
- Authentication key.
- - Required I(version) is C(v3).
+ - Required O(version=v3).
type: str
privacy:
description:
- Encryption algorithm.
- - Required if I(level) is C(authPriv).
+ - Required if O(level=authPriv).
type: str
choices: [ aes, des ]
privkey:
description:
- Encryption key.
- - Required if I(level) is C(authPriv).
+ - Required if O(level=authPriv).
type: str
timeout:
description:
@@ -137,7 +137,7 @@ ansible_sysname:
type: str
sample: ubuntu-user
ansible_syslocation:
- description: The physical location of this node (e.g., C(telephone closet, 3rd floor)).
+ description: The physical location of this node (for example, V(telephone closet, 3rd floor)).
returned: success
type: str
sample: Sitting on the Dock of the Bay
diff --git a/ansible_collections/community/general/plugins/modules/solaris_zone.py b/ansible_collections/community/general/plugins/modules/solaris_zone.py
index 0f970704e..d9f44589d 100644
--- a/ansible_collections/community/general/plugins/modules/solaris_zone.py
+++ b/ansible_collections/community/general/plugins/modules/solaris_zone.py
@@ -29,16 +29,16 @@ attributes:
options:
state:
description:
- - C(present), configure and install the zone.
- - C(installed), synonym for C(present).
- - C(running), if the zone already exists, boot it, otherwise, configure and install
+ - V(present), configure and install the zone.
+ - V(installed), synonym for V(present).
+ - V(running), if the zone already exists, boot it, otherwise, configure and install
the zone first, then boot it.
- - C(started), synonym for C(running).
- - C(stopped), shutdown a zone.
- - C(absent), destroy the zone.
- - C(configured), configure the ready so that it's to be attached.
- - C(attached), attach a zone, but do not boot it.
- - C(detached), shutdown and detach a zone
+ - V(started), synonym for V(running).
+ - V(stopped), shutdown a zone.
+ - V(absent), destroy the zone.
+ - V(configured), configure the ready so that it's to be attached.
+ - V(attached), attach a zone, but do not boot it.
+ - V(detached), shutdown and detach a zone
type: str
choices: [ absent, attached, configured, detached, installed, present, running, started, stopped ]
default: present
@@ -46,8 +46,8 @@ options:
description:
- Zone name.
- A zone name must be unique name.
- - A zone name must begin with an alpha-numeric character.
- - The name can contain alpha-numeric characters, underbars I(_), hyphens I(-), and periods I(.).
+ - A zone name must begin with an alphanumeric character.
+ - The name can contain alphanumeric characters, underscores V(_), hyphens V(-), and periods V(.).
- The name cannot be longer than 64 characters.
type: str
required: true
@@ -58,7 +58,7 @@ options:
type: str
sparse:
description:
- - Whether to create a sparse (C(true)) or whole root (C(false)) zone.
+ - Whether to create a sparse (V(true)) or whole root (V(false)) zone.
type: bool
default: false
root_password:
diff --git a/ansible_collections/community/general/plugins/modules/sorcery.py b/ansible_collections/community/general/plugins/modules/sorcery.py
index 3278ce0ab..4fcf46a05 100644
--- a/ansible_collections/community/general/plugins/modules/sorcery.py
+++ b/ansible_collections/community/general/plugins/modules/sorcery.py
@@ -1,7 +1,7 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright (c) 2015-2016, Vlad Glagolev <scm@vaygr.net>
+# Copyright (c) 2015-2023, Vlad Glagolev <scm@vaygr.net>
#
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
@@ -10,7 +10,7 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: sorcery
short_description: Package manager for Source Mage GNU/Linux
@@ -20,8 +20,7 @@ author: "Vlad Glagolev (@vaygr)"
notes:
- When all three components are selected, the update goes by the sequence --
Sorcery -> Grimoire(s) -> Spell(s); you cannot override it.
- - grimoire handling (i.e. add/remove, including SCM/rsync versions) is not
- yet supported.
+ - Grimoire handling is supported since community.general 7.3.0.
requirements:
- bash
extends_documentation_fragment:
@@ -34,21 +33,31 @@ attributes:
options:
name:
description:
- - Name of the spell
- - multiple names can be given, separated by commas
- - special value '*' in conjunction with states C(latest) or
- C(rebuild) will update or rebuild the whole system respectively
- aliases: ["spell"]
+ - Name of the spell or grimoire.
+ - Multiple names can be given, separated by commas.
+ - Special value V(*) in conjunction with states V(latest) or
+ V(rebuild) will update or rebuild the whole system respectively
+ - The alias O(grimoire) was added in community.general 7.3.0.
+ aliases: ["spell", "grimoire"]
type: list
elements: str
+ repository:
+ description:
+ - Repository location.
+ - If specified, O(name) represents grimoire(s) instead of spell(s).
+ - Special value V(*) will pull grimoire from the official location.
+ - Only single item in O(name) in conjunction with V(*) can be used.
+ - O(state=absent) must be used with a special value V(*).
+ type: str
+ version_added: 7.3.0
+
state:
description:
- - Whether to cast, dispel or rebuild a package
- - state C(cast) is an equivalent of C(present), not C(latest)
- - state C(latest) always triggers I(update_cache=true)
- - state C(rebuild) implies cast of all specified spells, not only
- those existed before
+ - Whether to cast, dispel or rebuild a package.
+ - State V(cast) is an equivalent of V(present), not V(latest).
+ - State V(rebuild) implies cast of all specified spells, not only
+ those existed before.
choices: ["present", "latest", "absent", "cast", "dispelled", "rebuild"]
default: "present"
type: str
@@ -56,32 +65,32 @@ options:
depends:
description:
- Comma-separated list of _optional_ dependencies to build a spell
- (or make sure it is built) with; use +/- in front of dependency
- to turn it on/off ('+' is optional though)
- - this option is ignored if C(name) parameter is equal to '*' or
- contains more than one spell
- - providers must be supplied in the form recognized by Sorcery, e.g.
- 'openssl(SSL)'
+ (or make sure it is built) with; use V(+)/V(-) in front of dependency
+ to turn it on/off (V(+) is optional though).
+ - This option is ignored if O(name) parameter is equal to V(*) or
+ contains more than one spell.
+ - Providers must be supplied in the form recognized by Sorcery,
+ for example 'V(openssl(SSL\))'.
type: str
update:
description:
- - Whether or not to update sorcery scripts at the very first stage
+ - Whether or not to update sorcery scripts at the very first stage.
type: bool
default: false
update_cache:
description:
- - Whether or not to update grimoire collection before casting spells
+ - Whether or not to update grimoire collection before casting spells.
type: bool
default: false
aliases: ["update_codex"]
cache_valid_time:
description:
- - Time in seconds to invalidate grimoire collection on update
- - especially useful for SCM and rsync grimoires
- - makes sense only in pair with C(update_cache)
+ - Time in seconds to invalidate grimoire collection on update.
+ - Especially useful for SCM and rsync grimoires.
+ - Makes sense only in pair with O(update_cache).
type: int
default: 0
'''
@@ -148,6 +157,30 @@ EXAMPLES = '''
update_codex: true
cache_valid_time: 86400
+- name: Make sure stable grimoire is present
+ community.general.sorcery:
+ name: stable
+ repository: '*'
+ state: present
+
+- name: Make sure binary and stable-rc grimoires are removed
+ community.general.sorcery:
+ grimoire: binary,stable-rc
+ repository: '*'
+ state: absent
+
+- name: Make sure games grimoire is pulled from rsync
+ community.general.sorcery:
+ grimoire: games
+ repository: "rsync://download.sourcemage.org::codex/games"
+ state: present
+
+- name: Make sure a specific branch of stable grimoire is pulled from git
+ community.general.sorcery:
+ grimoire: stable.git
+ repository: "git://download.sourcemage.org/smgl/grimoire.git:stable.git:stable-0.62"
+ state: present
+
- name: Update only Sorcery itself
community.general.sorcery:
update: true
@@ -165,6 +198,8 @@ import re
import shutil
import sys
+from ansible.module_utils.basic import AnsibleModule
+
# auto-filled at module init
SORCERY = {
@@ -178,6 +213,8 @@ SORCERY = {
SORCERY_LOG_DIR = "/var/log/sorcery"
SORCERY_STATE_DIR = "/var/state/sorcery"
+NA = "N/A"
+
def get_sorcery_ver(module):
""" Get Sorcery version. """
@@ -218,9 +255,11 @@ def codex_fresh(codex, module):
return True
-def codex_list(module):
+def codex_list(module, skip_new=False):
""" List valid grimoire collection. """
+ params = module.params
+
codex = {}
cmd_scribe = "%s index" % SORCERY['scribe']
@@ -239,6 +278,10 @@ def codex_list(module):
if match:
codex[match.group('grim')] = match.group('ver')
+ # return only specified grimoires unless requested to skip new
+ if params['repository'] and not skip_new:
+ codex = dict((x, codex.get(x, NA)) for x in params['name'])
+
if not codex:
module.fail_json(msg="no grimoires to operate on; add at least one")
@@ -256,8 +299,7 @@ def update_sorcery(module):
changed = False
if module.check_mode:
- if not module.params['name'] and not module.params['update_cache']:
- module.exit_json(changed=True, msg="would have updated Sorcery")
+ return (True, "would have updated Sorcery")
else:
sorcery_ver = get_sorcery_ver(module)
@@ -271,9 +313,7 @@ def update_sorcery(module):
if sorcery_ver != get_sorcery_ver(module):
changed = True
- if not module.params['name'] and not module.params['update_cache']:
- module.exit_json(changed=changed,
- msg="successfully updated Sorcery")
+ return (changed, "successfully updated Sorcery")
def update_codex(module):
@@ -292,28 +332,29 @@ def update_codex(module):
fresh = codex_fresh(codex, module)
if module.check_mode:
- if not params['name']:
- if not fresh:
- changed = True
+ if not fresh:
+ changed = True
- module.exit_json(changed=changed, msg="would have updated Codex")
- elif not fresh or params['name'] and params['state'] == 'latest':
- # SILENT is required as a workaround for query() in libgpg
- module.run_command_environ_update.update(dict(SILENT='1'))
+ return (changed, "would have updated Codex")
+ else:
+ if not fresh:
+ # SILENT is required as a workaround for query() in libgpg
+ module.run_command_environ_update.update(dict(SILENT='1'))
- cmd_scribe = "%s update" % SORCERY['scribe']
+ cmd_scribe = "%s update" % SORCERY['scribe']
- rc, stdout, stderr = module.run_command(cmd_scribe)
+ if params['repository']:
+ cmd_scribe += ' %s' % ' '.join(codex.keys())
- if rc != 0:
- module.fail_json(msg="unable to update Codex: " + stdout)
+ rc, stdout, stderr = module.run_command(cmd_scribe)
- if codex != codex_list(module):
- changed = True
+ if rc != 0:
+ module.fail_json(msg="unable to update Codex: " + stdout)
- if not params['name']:
- module.exit_json(changed=changed,
- msg="successfully updated Codex")
+ if codex != codex_list(module):
+ changed = True
+
+ return (changed, "successfully updated Codex")
def match_depends(module):
@@ -446,6 +487,65 @@ def match_depends(module):
return depends_ok
+def manage_grimoires(module):
+ """ Add or remove grimoires. """
+
+ params = module.params
+ grimoires = params['name']
+ url = params['repository']
+
+ codex = codex_list(module, True)
+
+ if url == '*':
+ if params['state'] in ('present', 'latest', 'absent'):
+ if params['state'] == 'absent':
+ action = "remove"
+ todo = set(grimoires) & set(codex)
+ else:
+ action = "add"
+ todo = set(grimoires) - set(codex)
+
+ if not todo:
+ return (False, "all grimoire(s) are already %sed" % action[:5])
+
+ if module.check_mode:
+ return (True, "would have %sed grimoire(s)" % action[:5])
+
+ cmd_scribe = "%s %s %s" % (SORCERY['scribe'], action, ' '.join(todo))
+
+ rc, stdout, stderr = module.run_command(cmd_scribe)
+
+ if rc != 0:
+ module.fail_json(msg="failed to %s one or more grimoire(s): %s" % (action, stdout))
+
+ return (True, "successfully %sed one or more grimoire(s)" % action[:5])
+ else:
+ module.fail_json(msg="unsupported operation on '*' repository value")
+ else:
+ if params['state'] in ('present', 'latest'):
+ if len(grimoires) > 1:
+ module.fail_json(msg="using multiple items with repository is invalid")
+
+ grimoire = grimoires[0]
+
+ if grimoire in codex:
+ return (False, "grimoire %s already exists" % grimoire)
+
+ if module.check_mode:
+ return (True, "would have added grimoire %s from %s" % (grimoire, url))
+
+ cmd_scribe = "%s add %s from %s" % (SORCERY['scribe'], grimoire, url)
+
+ rc, stdout, stderr = module.run_command(cmd_scribe)
+
+ if rc != 0:
+ module.fail_json(msg="failed to add grimoire %s from %s: %s" % (grimoire, url, stdout))
+
+ return (True, "successfully added grimoire %s from %s" % (grimoire, url))
+ else:
+ module.fail_json(msg="unsupported operation on repository value")
+
+
def manage_spells(module):
""" Cast or dispel spells.
@@ -471,7 +571,7 @@ def manage_spells(module):
# see update_codex()
module.run_command_environ_update.update(dict(SILENT='1'))
- cmd_sorcery = "%s queue"
+ cmd_sorcery = "%s queue" % SORCERY['sorcery']
rc, stdout, stderr = module.run_command(cmd_sorcery)
@@ -490,7 +590,7 @@ def manage_spells(module):
except IOError:
module.fail_json(msg="failed to restore the update queue")
- module.exit_json(changed=True, msg="would have updated the system")
+ return (True, "would have updated the system")
cmd_cast = "%s --queue" % SORCERY['cast']
@@ -499,12 +599,12 @@ def manage_spells(module):
if rc != 0:
module.fail_json(msg="failed to update the system")
- module.exit_json(changed=True, msg="successfully updated the system")
+ return (True, "successfully updated the system")
else:
- module.exit_json(changed=False, msg="the system is already up to date")
+ return (False, "the system is already up to date")
elif params['state'] == 'rebuild':
if module.check_mode:
- module.exit_json(changed=True, msg="would have rebuilt the system")
+ return (True, "would have rebuilt the system")
cmd_sorcery = "%s rebuild" % SORCERY['sorcery']
@@ -513,7 +613,7 @@ def manage_spells(module):
if rc != 0:
module.fail_json(msg="failed to rebuild the system: " + stdout)
- module.exit_json(changed=True, msg="successfully rebuilt the system")
+ return (True, "successfully rebuilt the system")
else:
module.fail_json(msg="unsupported operation on '*' name value")
else:
@@ -575,39 +675,40 @@ def manage_spells(module):
if cast_queue:
if module.check_mode:
- module.exit_json(changed=True, msg="would have cast spell(s)")
+ return (True, "would have cast spell(s)")
cmd_cast = "%s -c %s" % (SORCERY['cast'], ' '.join(cast_queue))
rc, stdout, stderr = module.run_command(cmd_cast)
if rc != 0:
- module.fail_json(msg="failed to cast spell(s): %s" + stdout)
+ module.fail_json(msg="failed to cast spell(s): " + stdout)
- module.exit_json(changed=True, msg="successfully cast spell(s)")
+ return (True, "successfully cast spell(s)")
elif params['state'] != 'absent':
- module.exit_json(changed=False, msg="spell(s) are already cast")
+ return (False, "spell(s) are already cast")
if dispel_queue:
if module.check_mode:
- module.exit_json(changed=True, msg="would have dispelled spell(s)")
+ return (True, "would have dispelled spell(s)")
cmd_dispel = "%s %s" % (SORCERY['dispel'], ' '.join(dispel_queue))
rc, stdout, stderr = module.run_command(cmd_dispel)
if rc != 0:
- module.fail_json(msg="failed to dispel spell(s): %s" + stdout)
+ module.fail_json(msg="failed to dispel spell(s): " + stdout)
- module.exit_json(changed=True, msg="successfully dispelled spell(s)")
+ return (True, "successfully dispelled spell(s)")
else:
- module.exit_json(changed=False, msg="spell(s) are already dispelled")
+ return (False, "spell(s) are already dispelled")
def main():
module = AnsibleModule(
argument_spec=dict(
- name=dict(default=None, aliases=['spell'], type='list', elements='str'),
+ name=dict(default=None, aliases=['spell', 'grimoire'], type='list', elements='str'),
+ repository=dict(default=None, type='str'),
state=dict(default='present', choices=['present', 'latest',
'absent', 'cast', 'dispelled', 'rebuild']),
depends=dict(default=None),
@@ -636,18 +737,34 @@ def main():
elif params['state'] in ('absent', 'dispelled'):
params['state'] = 'absent'
+ changed = {
+ 'sorcery': (False, NA),
+ 'grimoires': (False, NA),
+ 'codex': (False, NA),
+ 'spells': (False, NA)
+ }
+
if params['update']:
- update_sorcery(module)
+ changed['sorcery'] = update_sorcery(module)
- if params['update_cache'] or params['state'] == 'latest':
- update_codex(module)
+ if params['name'] and params['repository']:
+ changed['grimoires'] = manage_grimoires(module)
- if params['name']:
- manage_spells(module)
+ if params['update_cache']:
+ changed['codex'] = update_codex(module)
+ if params['name'] and not params['repository']:
+ changed['spells'] = manage_spells(module)
+
+ if any(x[0] for x in changed.values()):
+ state_msg = "state changed"
+ state_changed = True
+ else:
+ state_msg = "no change in state"
+ state_changed = False
+
+ module.exit_json(changed=state_changed, msg=state_msg + ": " + '; '.join(x[1] for x in changed.values()))
-# import module snippets
-from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
diff --git a/ansible_collections/community/general/plugins/modules/spectrum_device.py b/ansible_collections/community/general/plugins/modules/spectrum_device.py
index 5cfc07664..7cf7cf915 100644
--- a/ansible_collections/community/general/plugins/modules/spectrum_device.py
+++ b/ansible_collections/community/general/plugins/modules/spectrum_device.py
@@ -9,7 +9,7 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: spectrum_device
short_description: Creates/deletes devices in CA Spectrum
@@ -36,7 +36,7 @@ options:
type: str
description:
- SNMP community used for device discovery.
- - Required when I(state=present).
+ - Required when O(state=present).
required: true
landscape:
type: str
@@ -46,8 +46,8 @@ options:
state:
type: str
description:
- - On C(present) creates the device when it does not exist.
- - On C(absent) removes the device when it exists.
+ - On V(present) creates the device when it does not exist.
+ - On V(absent) removes the device when it exists.
choices: ['present', 'absent']
default: 'present'
url:
@@ -55,7 +55,7 @@ options:
aliases: [ oneclick_url ]
required: true
description:
- - HTTP, HTTPS URL of the Oneclick server in the form C((http|https)://host.domain[:port]).
+ - HTTP, HTTPS URL of the Oneclick server in the form V((http|https\)://host.domain[:port]).
url_username:
type: str
aliases: [ oneclick_user ]
@@ -70,12 +70,12 @@ options:
- Oneclick user password.
use_proxy:
description:
- - if C(false), it will not use a proxy, even if one is defined in an environment variable on the target hosts.
+ - if V(false), it will not use a proxy, even if one is defined in an environment variable on the target hosts.
default: true
type: bool
validate_certs:
description:
- - If C(false), SSL certificates will not be validated. This should only be used
+ - If V(false), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
default: true
type: bool
diff --git a/ansible_collections/community/general/plugins/modules/spectrum_model_attrs.py b/ansible_collections/community/general/plugins/modules/spectrum_model_attrs.py
index 028ad7f9f..43983a11a 100644
--- a/ansible_collections/community/general/plugins/modules/spectrum_model_attrs.py
+++ b/ansible_collections/community/general/plugins/modules/spectrum_model_attrs.py
@@ -21,8 +21,6 @@ author:
notes:
- Tested on CA Spectrum version 10.4.2.0.189.
- Model creation and deletion are not possible with this module. For that use M(community.general.spectrum_device) instead.
-requirements:
- - 'python >= 2.7'
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -50,7 +48,7 @@ options:
aliases: [password]
use_proxy:
description:
- - if C(false), it will not use a proxy, even if one is defined in
+ - if V(false), it will not use a proxy, even if one is defined in
an environment variable on the target hosts.
default: true
required: false
@@ -67,7 +65,7 @@ options:
required: true
validate_certs:
description:
- - Validate SSL certificates. Only change this to C(false) if you can guarantee that you are talking to the correct endpoint and there is no
+ - Validate SSL certificates. Only change this to V(false) if you can guarantee that you are talking to the correct endpoint and there is no
man-in-the-middle attack happening.
type: bool
default: true
@@ -109,7 +107,7 @@ options:
required: true
value:
description:
- - Attribute value. Empty strings should be C("") or C(null).
+ - Attribute value. Empty strings should be V("") or V(null).
type: str
required: true
'''
diff --git a/ansible_collections/community/general/plugins/modules/spotinst_aws_elastigroup.py b/ansible_collections/community/general/plugins/modules/spotinst_aws_elastigroup.py
index 02f2d3c5c..45556f621 100644
--- a/ansible_collections/community/general/plugins/modules/spotinst_aws_elastigroup.py
+++ b/ansible_collections/community/general/plugins/modules/spotinst_aws_elastigroup.py
@@ -19,7 +19,6 @@ description:
token = <YOUR TOKEN>
Full documentation available at https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible-
requirements:
- - python >= 2.7
- spotinst_sdk >= 1.0.38
extends_documentation_fragment:
- community.general.attributes
@@ -46,13 +45,13 @@ options:
description:
- A Personal API Access Token issued by Spotinst.
- >-
- When not specified, the module will try to obtain it, in that order, from: environment variable C(SPOTINST_TOKEN), or from the credentials path.
+ When not specified, the module will try to obtain it, in that order, from: environment variable E(SPOTINST_TOKEN), or from the credentials path.
type: str
availability_vs_cost:
description:
- The strategy orientation.
- - "The choices available are: C(availabilityOriented), C(costOriented), C(balanced)."
+ - "The choices available are: V(availabilityOriented), V(costOriented), V(balanced)."
required: true
type: str
@@ -127,7 +126,7 @@ options:
elastic_ips:
description:
- - List of ElasticIps Allocation Ids (Example C(eipalloc-9d4e16f8)) to associate to the group instances
+ - List of ElasticIps Allocation Ids (example V(eipalloc-9d4e16f8)) to associate to the group instances
type: list
elements: str
@@ -139,7 +138,7 @@ options:
health_check_grace_period:
description:
- The amount of time, in seconds, after the instance has launched to start and check its health.
- - If not specified, it defaults to C(300).
+ - If not specified, it defaults to V(300).
type: int
health_check_unhealthy_duration_before_replacement:
@@ -150,7 +149,7 @@ options:
health_check_type:
description:
- The service to use for the health check.
- - "The choices available are: C(ELB), C(HCS), C(TARGET_GROUP), C(MLB), C(EC2)."
+ - "The choices available are: V(ELB), V(HCS), V(TARGET_GROUP), V(MLB), V(EC2)."
type: str
iam_role_name:
@@ -266,14 +265,14 @@ options:
opsworks:
description:
- - The elastigroup OpsWorks integration configration.;
+ - The elastigroup OpsWorks integration configuration.;
Expects the following key -
layer_id (String)
type: dict
persistence:
description:
- - The Stateful elastigroup configration.;
+ - The Stateful elastigroup configuration.;
Accepts the following keys -
should_persist_root_device (Boolean),
should_persist_block_devices (Boolean),
@@ -283,7 +282,7 @@ options:
product:
description:
- Operation system type.
- - "Available choices are: C(Linux/UNIX), C(SUSE Linux), C(Windows), C(Linux/UNIX (Amazon VPC)), C(SUSE Linux (Amazon VPC))."
+ - "Available choices are: V(Linux/UNIX), V(SUSE Linux), V(Windows), V(Linux/UNIX (Amazon VPC)), V(SUSE Linux (Amazon VPC))."
required: true
type: str
@@ -404,7 +403,7 @@ options:
tenancy:
description:
- Dedicated vs shared tenancy.
- - "The available choices are: C(default), C(dedicated)."
+ - "The available choices are: V(default), V(dedicated)."
type: str
terminate_at_end_of_billing_hour:
@@ -415,7 +414,7 @@ options:
unit:
description:
- The capacity unit to launch instances by.
- - "The available choices are: C(instance), C(weight)."
+ - "The available choices are: V(instance), V(weight)."
type: str
up_scaling_policies:
diff --git a/ansible_collections/community/general/plugins/modules/ssh_config.py b/ansible_collections/community/general/plugins/modules/ssh_config.py
index 672ac8c47..e89e087b3 100644
--- a/ansible_collections/community/general/plugins/modules/ssh_config.py
+++ b/ansible_collections/community/general/plugins/modules/ssh_config.py
@@ -38,20 +38,20 @@ options:
user:
description:
- Which user account this configuration file belongs to.
- - If none given and I(ssh_config_file) is not specified, C(/etc/ssh/ssh_config) is used.
+ - If none given and O(ssh_config_file) is not specified, C(/etc/ssh/ssh_config) is used.
- If a user is given, C(~/.ssh/config) is used.
- - Mutually exclusive with I(ssh_config_file).
+ - Mutually exclusive with O(ssh_config_file).
type: str
group:
description:
- Which group this configuration file belongs to.
- - If none given, I(user) is used.
+ - If none given, O(user) is used.
type: str
host:
description:
- The endpoint this configuration is valid for.
- Can be an actual address on the internet or an alias that will
- connect to the value of I(hostname).
+ connect to the value of O(hostname).
required: true
type: str
hostname:
@@ -70,8 +70,17 @@ options:
description:
- The path to an identity file (SSH private key) that will be used
when connecting to this host.
- - File need to exist and have mode C(0600) to be valid.
+ - File need to exist and have mode V(0600) to be valid.
type: path
+ identities_only:
+ description:
+ - Specifies that SSH should only use the configured authentication
+ identity and certificate files (either the default files, or
+ those explicitly configured in the C(ssh_config) files or passed on
+ the ssh command-line), even if ssh-agent or a PKCS11Provider or
+ SecurityKeyProvider offers more identities.
+ type: bool
+ version_added: 8.2.0
user_known_hosts_file:
description:
- Sets the user known hosts file option.
@@ -84,12 +93,12 @@ options:
proxycommand:
description:
- Sets the C(ProxyCommand) option.
- - Mutually exclusive with I(proxyjump).
+ - Mutually exclusive with O(proxyjump).
type: str
proxyjump:
description:
- Sets the C(ProxyJump) option.
- - Mutually exclusive with I(proxycommand).
+ - Mutually exclusive with O(proxycommand).
type: str
version_added: 6.5.0
forward_agent:
@@ -97,17 +106,38 @@ options:
- Sets the C(ForwardAgent) option.
type: bool
version_added: 4.0.0
+ add_keys_to_agent:
+ description:
+ - Sets the C(AddKeysToAgent) option.
+ type: bool
+ version_added: 8.2.0
ssh_config_file:
description:
- SSH config file.
- - If I(user) and this option are not specified, C(/etc/ssh/ssh_config) is used.
- - Mutually exclusive with I(user).
+ - If O(user) and this option are not specified, C(/etc/ssh/ssh_config) is used.
+ - Mutually exclusive with O(user).
type: path
host_key_algorithms:
description:
- Sets the C(HostKeyAlgorithms) option.
type: str
version_added: 6.1.0
+ controlmaster:
+ description:
+ - Sets the C(ControlMaster) option.
+ choices: [ 'yes', 'no', 'ask', 'auto', 'autoask' ]
+ type: str
+ version_added: 8.1.0
+ controlpath:
+ description:
+ - Sets the C(ControlPath) option.
+ type: str
+ version_added: 8.1.0
+ controlpersist:
+ description:
+ - Sets the C(ControlPersist) option.
+ type: str
+ version_added: 8.1.0
requirements:
- paramiko
'''
@@ -177,6 +207,22 @@ from ansible_collections.community.general.plugins.module_utils._stormssh import
from ansible_collections.community.general.plugins.module_utils.ssh import determine_config_file
+def convert_bool(value):
+ if value is True:
+ return 'yes'
+ if value is False:
+ return 'no'
+ return None
+
+
+def fix_bool_str(value):
+ if value == 'True':
+ return 'yes'
+ if value == 'False':
+ return 'no'
+ return value
+
+
class SSHConfig(object):
def __init__(self, module):
self.module = module
@@ -213,20 +259,20 @@ class SSHConfig(object):
hostname=self.params.get('hostname'),
port=self.params.get('port'),
identity_file=self.params.get('identity_file'),
+ identities_only=convert_bool(self.params.get('identities_only')),
user=self.params.get('remote_user'),
strict_host_key_checking=self.params.get('strict_host_key_checking'),
user_known_hosts_file=self.params.get('user_known_hosts_file'),
proxycommand=self.params.get('proxycommand'),
proxyjump=self.params.get('proxyjump'),
host_key_algorithms=self.params.get('host_key_algorithms'),
+ forward_agent=convert_bool(self.params.get('forward_agent')),
+ add_keys_to_agent=convert_bool(self.params.get('add_keys_to_agent')),
+ controlmaster=self.params.get('controlmaster'),
+ controlpath=self.params.get('controlpath'),
+ controlpersist=fix_bool_str(self.params.get('controlpersist')),
)
- # Convert True / False to 'yes' / 'no' for usage in ssh_config
- if self.params['forward_agent'] is True:
- args['forward_agent'] = 'yes'
- if self.params['forward_agent'] is False:
- args['forward_agent'] = 'no'
-
config_changed = False
hosts_changed = []
hosts_change_diff = []
@@ -312,17 +358,23 @@ def main():
hostname=dict(type='str'),
host_key_algorithms=dict(type='str', no_log=False),
identity_file=dict(type='path'),
+ identities_only=dict(type='bool'),
port=dict(type='str'),
proxycommand=dict(type='str', default=None),
proxyjump=dict(type='str', default=None),
forward_agent=dict(type='bool'),
+ add_keys_to_agent=dict(type='bool'),
remote_user=dict(type='str'),
ssh_config_file=dict(default=None, type='path'),
state=dict(type='str', default='present', choices=['present', 'absent']),
strict_host_key_checking=dict(
+ type='str',
default=None,
choices=['yes', 'no', 'ask']
),
+ controlmaster=dict(type='str', default=None, choices=['yes', 'no', 'ask', 'auto', 'autoask']),
+ controlpath=dict(type='str', default=None),
+ controlpersist=dict(type='str', default=None),
user=dict(default=None, type='str'),
user_known_hosts_file=dict(type='str', default=None),
),
diff --git a/ansible_collections/community/general/plugins/modules/stackdriver.py b/ansible_collections/community/general/plugins/modules/stackdriver.py
index cf7cb2f47..35b2b0dc1 100644
--- a/ansible_collections/community/general/plugins/modules/stackdriver.py
+++ b/ansible_collections/community/general/plugins/modules/stackdriver.py
@@ -10,6 +10,11 @@ __metaclass__ = type
DOCUMENTATION = '''
+deprecated:
+ removed_in: 9.0.0
+ why: the endpoints this module relies on do not exist any more and do not resolve to IPs in DNS.
+ alternative: no known alternative at this point
+
module: stackdriver
short_description: Send code deploy and annotation events to stackdriver
description:
diff --git a/ansible_collections/community/general/plugins/modules/stacki_host.py b/ansible_collections/community/general/plugins/modules/stacki_host.py
index e286bc961..57440a24d 100644
--- a/ansible_collections/community/general/plugins/modules/stacki_host.py
+++ b/ansible_collections/community/general/plugins/modules/stacki_host.py
@@ -30,13 +30,13 @@ options:
type: str
stacki_user:
description:
- - Username for authenticating with Stacki API, but if not specified, the environment variable C(stacki_user) is used instead.
+ - Username for authenticating with Stacki API, but if not specified, the environment variable E(stacki_user) is used instead.
required: true
type: str
stacki_password:
description:
- Password for authenticating with Stacki API, but if not
- specified, the environment variable C(stacki_password) is used instead.
+ specified, the environment variable E(stacki_password) is used instead.
required: true
type: str
stacki_endpoint:
@@ -61,7 +61,7 @@ options:
type: str
force_install:
description:
- - Set value to C(true) to force node into install state if it already exists in stacki.
+ - Set value to V(true) to force node into install state if it already exists in stacki.
type: bool
default: false
state:
@@ -72,21 +72,21 @@ options:
default: present
appliance:
description:
- - Applicance to be used in host creation.
- - Required if I(state) is C(present) and host does not yet exist.
+ - Appliance to be used in host creation.
+ - Required if O(state=present) and host does not yet exist.
type: str
default: backend
rack:
description:
- Rack to be used in host creation.
- - Required if I(state) is C(present) and host does not yet exist.
+ - Required if O(state=present) and host does not yet exist.
type: int
default: 0
rank:
description:
- Rank to be used in host creation.
- In Stacki terminology, the rank is the position of the machine in a rack.
- - Required if I(state) is C(present) and host does not yet exist.
+ - Required if O(state=present) and host does not yet exist.
type: int
default: 0
network:
diff --git a/ansible_collections/community/general/plugins/modules/statsd.py b/ansible_collections/community/general/plugins/modules/statsd.py
index 65d33b709..8bc0f0b18 100644
--- a/ansible_collections/community/general/plugins/modules/statsd.py
+++ b/ansible_collections/community/general/plugins/modules/statsd.py
@@ -14,8 +14,8 @@ version_added: 2.1.0
description:
- The C(statsd) module sends metrics to StatsD.
- For more information, see U(https://statsd-metrics.readthedocs.io/en/latest/).
- - Supported metric types are C(counter) and C(gauge).
- Currently unupported metric types are C(timer), C(set), and C(gaugedelta).
+ - Supported metric types are V(counter) and V(gauge).
+ Currently unupported metric types are V(timer), V(set), and V(gaugedelta).
author: "Mark Mercado (@mamercad)"
requirements:
- statsd
@@ -30,7 +30,7 @@ options:
state:
type: str
description:
- - State of the check, only C(present) makes sense.
+ - State of the check, only V(present) makes sense.
choices: ["present"]
default: present
host:
@@ -42,7 +42,7 @@ options:
type: int
default: 8125
description:
- - The port on C(host) which StatsD is listening on.
+ - The port on O(host) which StatsD is listening on.
protocol:
type: str
default: udp
@@ -53,7 +53,7 @@ options:
type: float
default: 1.0
description:
- - Sender timeout, only applicable if C(protocol) is C(tcp).
+ - Sender timeout, only applicable if O(protocol) is V(tcp).
metric:
type: str
required: true
@@ -79,7 +79,7 @@ options:
type: bool
default: false
description:
- - If the metric is of type C(gauge), change the value by C(delta).
+ - If the metric is of type V(gauge), change the value by O(delta).
'''
EXAMPLES = '''
diff --git a/ansible_collections/community/general/plugins/modules/statusio_maintenance.py b/ansible_collections/community/general/plugins/modules/statusio_maintenance.py
index 31b422453..e6b34b709 100644
--- a/ansible_collections/community/general/plugins/modules/statusio_maintenance.py
+++ b/ansible_collections/community/general/plugins/modules/statusio_maintenance.py
@@ -286,25 +286,24 @@ def create_maintenance(auth_headers, url, statuspage, host_ids,
returned_date, maintenance_notify_now,
maintenance_notify_72_hr, maintenance_notify_24_hr,
maintenance_notify_1_hr):
- returned_dates = [[x] for x in returned_date]
component_id = []
container_id = []
for val in host_ids:
component_id.append(val['component_id'])
container_id.append(val['container_id'])
+ infrastructure_id = [i + '-' + j for i, j in zip(component_id, container_id)]
try:
values = json.dumps({
"statuspage_id": statuspage,
- "components": component_id,
- "containers": container_id,
"all_infrastructure_affected": str(int(all_infrastructure_affected)),
+ "infrastructure_affected": infrastructure_id,
"automation": str(int(automation)),
"maintenance_name": title,
"maintenance_details": desc,
- "date_planned_start": returned_dates[0],
- "time_planned_start": returned_dates[1],
- "date_planned_end": returned_dates[2],
- "time_planned_end": returned_dates[3],
+ "date_planned_start": returned_date[0],
+ "time_planned_start": returned_date[1],
+ "date_planned_end": returned_date[2],
+ "time_planned_end": returned_date[3],
"maintenance_notify_now": str(int(maintenance_notify_now)),
"maintenance_notify_72_hr": str(int(maintenance_notify_72_hr)),
"maintenance_notify_24_hr": str(int(maintenance_notify_24_hr)),
diff --git a/ansible_collections/community/general/plugins/modules/sudoers.py b/ansible_collections/community/general/plugins/modules/sudoers.py
index fd8289b1c..a392b4adf 100644
--- a/ansible_collections/community/general/plugins/modules/sudoers.py
+++ b/ansible_collections/community/general/plugins/modules/sudoers.py
@@ -31,13 +31,13 @@ options:
description:
- The commands allowed by the sudoers rule.
- Multiple can be added by passing a list of commands.
- - Use C(ALL) for all commands.
+ - Use V(ALL) for all commands.
type: list
elements: str
group:
description:
- The name of the group for the sudoers rule.
- - This option cannot be used in conjunction with I(user).
+ - This option cannot be used in conjunction with O(user).
type: str
name:
required: true
@@ -45,6 +45,12 @@ options:
- The name of the sudoers rule.
- This will be used for the filename for the sudoers file managed by this rule.
type: str
+ noexec:
+ description:
+ - Whether a command is prevented to run further commands itself.
+ default: false
+ type: bool
+ version_added: 8.4.0
nopassword:
description:
- Whether a password will be required to run the sudo'd command.
@@ -83,13 +89,13 @@ options:
user:
description:
- The name of the user for the sudoers rule.
- - This option cannot be used in conjunction with I(group).
+ - This option cannot be used in conjunction with O(group).
type: str
validation:
description:
- - If C(absent), the sudoers rule will be added without validation.
- - If C(detect) and visudo is available, then the sudoers rule will be validated by visudo.
- - If C(required), visudo must be available to validate the sudoers rule.
+ - If V(absent), the sudoers rule will be added without validation.
+ - If V(detect) and visudo is available, then the sudoers rule will be validated by visudo.
+ - If V(required), visudo must be available to validate the sudoers rule.
type: str
default: detect
choices: [ absent, detect, required ]
@@ -143,6 +149,15 @@ EXAMPLES = '''
user: alice
commands: /usr/local/bin/upload
setenv: true
+
+- name: >-
+ Allow alice to sudo /usr/bin/less but prevent less from
+ running further commands itself
+ community.general.sudoers:
+ name: allow-alice-restricted-less
+ user: alice
+ commands: /usr/bin/less
+ noexec: true
'''
import os
@@ -162,6 +177,7 @@ class Sudoers(object):
self.user = module.params['user']
self.group = module.params['group']
self.state = module.params['state']
+ self.noexec = module.params['noexec']
self.nopassword = module.params['nopassword']
self.setenv = module.params['setenv']
self.host = module.params['host']
@@ -205,13 +221,15 @@ class Sudoers(object):
owner = '%{group}'.format(group=self.group)
commands_str = ', '.join(self.commands)
+ noexec_str = 'NOEXEC:' if self.noexec else ''
nopasswd_str = 'NOPASSWD:' if self.nopassword else ''
setenv_str = 'SETENV:' if self.setenv else ''
runas_str = '({runas})'.format(runas=self.runas) if self.runas is not None else ''
- return "{owner} {host}={runas}{nopasswd}{setenv} {commands}\n".format(
+ return "{owner} {host}={runas}{noexec}{nopasswd}{setenv} {commands}\n".format(
owner=owner,
host=self.host,
runas=runas_str,
+ noexec=noexec_str,
nopasswd=nopasswd_str,
setenv=setenv_str,
commands=commands_str
@@ -258,6 +276,10 @@ def main():
'name': {
'required': True,
},
+ 'noexec': {
+ 'type': 'bool',
+ 'default': False,
+ },
'nopassword': {
'type': 'bool',
'default': True,
diff --git a/ansible_collections/community/general/plugins/modules/supervisorctl.py b/ansible_collections/community/general/plugins/modules/supervisorctl.py
index e9df16108..e8d9c89a6 100644
--- a/ansible_collections/community/general/plugins/modules/supervisorctl.py
+++ b/ansible_collections/community/general/plugins/modules/supervisorctl.py
@@ -27,9 +27,8 @@ options:
type: str
description:
- The name of the supervisord program or group to manage.
- - The name will be taken as group name when it ends with a colon I(:)
- - Group support is only available in Ansible version 1.6 or later.
- - If I(name=all), all programs and program groups will be managed.
+ - The name will be taken as group name when it ends with a colon V(:).
+ - If O(name=all), all programs and program groups will be managed.
required: true
config:
type: path
@@ -53,6 +52,13 @@ options:
- The desired state of program/group.
required: true
choices: [ "present", "started", "stopped", "restarted", "absent", "signalled" ]
+ stop_before_removing:
+ type: bool
+ description:
+ - Use O(stop_before_removing=true) to stop the program/group before removing it
+ required: false
+ default: false
+ version_added: 7.5.0
signal:
type: str
description:
@@ -62,9 +68,10 @@ options:
description:
- path to supervisorctl executable
notes:
- - When C(state) = I(present), the module will call C(supervisorctl reread) then C(supervisorctl add) if the program/group does not exist.
- - When C(state) = I(restarted), the module will call C(supervisorctl update) then call C(supervisorctl restart).
- - When C(state) = I(absent), the module will call C(supervisorctl reread) then C(supervisorctl remove) to remove the target program/group.
+ - When O(state=present), the module will call C(supervisorctl reread) then C(supervisorctl add) if the program/group does not exist.
+ - When O(state=restarted), the module will call C(supervisorctl update) then call C(supervisorctl restart).
+ - When O(state=absent), the module will call C(supervisorctl reread) then C(supervisorctl remove) to remove the target program/group.
+ If the program/group is still running, the action will fail. If you want to stop the program/group before removing, use O(stop_before_removing=true).
requirements: [ "supervisorctl" ]
author:
- "Matt Wright (@mattupstate)"
@@ -121,6 +128,7 @@ def main():
password=dict(type='str', no_log=True),
supervisorctl_path=dict(type='path'),
state=dict(type='str', required=True, choices=['present', 'started', 'restarted', 'stopped', 'absent', 'signalled']),
+ stop_before_removing=dict(type='bool', default=False),
signal=dict(type='str'),
)
@@ -136,6 +144,7 @@ def main():
is_group = True
name = name.rstrip(':')
state = module.params['state']
+ stop_before_removing = module.params.get('stop_before_removing')
config = module.params.get('config')
server_url = module.params.get('server_url')
username = module.params.get('username')
@@ -199,22 +208,27 @@ def main():
matched.append((process_name, status))
return matched
- def take_action_on_processes(processes, status_filter, action, expected_result):
+ def take_action_on_processes(processes, status_filter, action, expected_result, exit_module=True):
to_take_action_on = []
for process_name, status in processes:
if status_filter(status):
to_take_action_on.append(process_name)
if len(to_take_action_on) == 0:
+ if not exit_module:
+ return
module.exit_json(changed=False, name=name, state=state)
if module.check_mode:
+ if not exit_module:
+ return
module.exit_json(changed=True)
for process_name in to_take_action_on:
rc, out, err = run_supervisorctl(action, process_name, check_rc=True)
if '%s: %s' % (process_name, expected_result) not in out:
module.fail_json(msg=out)
- module.exit_json(changed=True, name=name, state=state, affected=to_take_action_on)
+ if exit_module:
+ module.exit_json(changed=True, name=name, state=state, affected=to_take_action_on)
if state == 'restarted':
rc, out, err = run_supervisorctl('update', check_rc=True)
@@ -230,6 +244,9 @@ def main():
if len(processes) == 0:
module.exit_json(changed=False, name=name, state=state)
+ if stop_before_removing:
+ take_action_on_processes(processes, lambda s: s in ('RUNNING', 'STARTING'), 'stop', 'stopped', exit_module=False)
+
if module.check_mode:
module.exit_json(changed=True)
run_supervisorctl('reread', check_rc=True)
diff --git a/ansible_collections/community/general/plugins/modules/svc.py b/ansible_collections/community/general/plugins/modules/svc.py
index bd2eaeb22..b327ddfd6 100644
--- a/ansible_collections/community/general/plugins/modules/svc.py
+++ b/ansible_collections/community/general/plugins/modules/svc.py
@@ -31,11 +31,11 @@ options:
required: true
state:
description:
- - C(Started)/C(stopped) are idempotent actions that will not run
- commands unless necessary. C(restarted) will always bounce the
- svc (svc -t) and C(killed) will always bounce the svc (svc -k).
- C(reloaded) will send a sigusr1 (svc -1).
- C(once) will run a normally downed svc once (svc -o), not really
+ - V(started)/V(stopped) are idempotent actions that will not run
+ commands unless necessary. V(restarted) will always bounce the
+ svc (svc -t) and V(killed) will always bounce the svc (svc -k).
+ V(reloaded) will send a sigusr1 (svc -1).
+ V(once) will run a normally downed svc once (svc -o), not really
an idempotent operation.
type: str
choices: [ killed, once, reloaded, restarted, started, stopped ]
diff --git a/ansible_collections/community/general/plugins/modules/svr4pkg.py b/ansible_collections/community/general/plugins/modules/svr4pkg.py
index e8c410482..db9902c77 100644
--- a/ansible_collections/community/general/plugins/modules/svr4pkg.py
+++ b/ansible_collections/community/general/plugins/modules/svr4pkg.py
@@ -31,14 +31,14 @@ attributes:
options:
name:
description:
- - Package name, e.g. C(SUNWcsr)
+ - Package name, for example V(SUNWcsr).
required: true
type: str
state:
description:
- - Whether to install (C(present)), or remove (C(absent)) a package.
- - If the package is to be installed, then I(src) is required.
+ - Whether to install (V(present)), or remove (V(absent)) a package.
+ - If the package is to be installed, then O(src) is required.
- The SVR4 package system doesn't provide an upgrade operation. You need to uninstall the old, then install the new package.
required: true
choices: ["present", "absent"]
@@ -46,17 +46,17 @@ options:
src:
description:
- - Specifies the location to install the package from. Required when I(state=present).
- - "Can be any path acceptable to the C(pkgadd) command's C(-d) option. e.g.: C(somefile.pkg), C(/dir/with/pkgs), C(http:/server/mypkgs.pkg)."
+ - Specifies the location to install the package from. Required when O(state=present).
+ - "Can be any path acceptable to the C(pkgadd) command's C(-d) option. For example: V(somefile.pkg), V(/dir/with/pkgs), V(http:/server/mypkgs.pkg)."
- If using a file or directory, they must already be accessible by the host. See the M(ansible.builtin.copy) module for a way to get them there.
type: str
proxy:
description:
- - HTTP[s] proxy to be used if I(src) is a URL.
+ - HTTP[s] proxy to be used if O(src) is a URL.
type: str
response_file:
description:
- - Specifies the location of a response file to be used if package expects input on install. (added in Ansible 1.4)
+ - Specifies the location of a response file to be used if package expects input on install.
required: false
type: str
zone:
diff --git a/ansible_collections/community/general/plugins/modules/swdepot.py b/ansible_collections/community/general/plugins/modules/swdepot.py
index c4660c70d..28a8ce314 100644
--- a/ansible_collections/community/general/plugins/modules/swdepot.py
+++ b/ansible_collections/community/general/plugins/modules/swdepot.py
@@ -36,7 +36,7 @@ options:
type: str
state:
description:
- - whether to install (C(present), C(latest)), or remove (C(absent)) a package.
+ - whether to install (V(present), V(latest)), or remove (V(absent)) a package.
required: true
choices: [ 'present', 'latest', 'absent']
type: str
diff --git a/ansible_collections/community/general/plugins/modules/swupd.py b/ansible_collections/community/general/plugins/modules/swupd.py
index efd7ca7c1..16738c8cb 100644
--- a/ansible_collections/community/general/plugins/modules/swupd.py
+++ b/ansible_collections/community/general/plugins/modules/swupd.py
@@ -50,8 +50,8 @@ options:
type: str
state:
description:
- - Indicates the desired (I)bundle state. C(present) ensures the bundle
- is installed while C(absent) ensures the (I)bundle is not installed.
+ - Indicates the desired (I)bundle state. V(present) ensures the bundle
+ is installed while V(absent) ensures the (I)bundle is not installed.
default: present
choices: [present, absent]
type: str
@@ -62,7 +62,7 @@ options:
default: false
url:
description:
- - Overrides both I(contenturl) and I(versionurl).
+ - Overrides both O(contenturl) and O(versionurl).
type: str
verify:
description:
diff --git a/ansible_collections/community/general/plugins/modules/sysrc.py b/ansible_collections/community/general/plugins/modules/sysrc.py
index 9652b629a..6780975d4 100644
--- a/ansible_collections/community/general/plugins/modules/sysrc.py
+++ b/ansible_collections/community/general/plugins/modules/sysrc.py
@@ -33,28 +33,28 @@ options:
required: true
value:
description:
- - The value to set when I(state=present).
- - The value to add when I(state=value_present).
- - The value to remove when I(state=value_absent).
+ - The value to set when O(state=present).
+ - The value to add when O(state=value_present).
+ - The value to remove when O(state=value_absent).
type: str
state:
description:
- - Use I(present) to add the variable.
- - Use I(absent) to remove the variable.
- - Use I(value_present) to add the value to the existing variable.
- - Use I(value_absent) to remove the value from the existing variable.
+ - Use V(present) to add the variable.
+ - Use V(absent) to remove the variable.
+ - Use V(value_present) to add the value to the existing variable.
+ - Use V(value_absent) to remove the value from the existing variable.
type: str
default: "present"
choices: [ absent, present, value_present, value_absent ]
path:
description:
- - Path to file to use instead of C(/etc/rc.conf).
+ - Path to file to use instead of V(/etc/rc.conf).
type: str
default: "/etc/rc.conf"
delim:
description:
- - Delimiter to be used instead of C( ).
- - Only used when I(state=value_present) or I(state=value_absent).
+ - Delimiter to be used instead of V(" ") (space).
+ - Only used when O(state=value_present) or O(state=value_absent).
default: " "
type: str
jail:
@@ -62,7 +62,7 @@ options:
- Name or ID of the jail to operate on.
type: str
notes:
- - The C(name) cannot contain periods as sysrc does not support OID style names.
+ - The O(name) cannot contain periods as sysrc does not support OID style names.
'''
EXAMPLES = r'''
@@ -222,7 +222,7 @@ def main():
# OID style names are not supported
if not re.match('^[a-zA-Z0-9_]+$', name):
module.fail_json(
- msg="Name may only contain alpha-numeric and underscore characters"
+ msg="Name may only contain alphanumeric and underscore characters"
)
value = module.params.pop('value')
diff --git a/ansible_collections/community/general/plugins/modules/sysupgrade.py b/ansible_collections/community/general/plugins/modules/sysupgrade.py
index ac80e0196..639fa345a 100644
--- a/ansible_collections/community/general/plugins/modules/sysupgrade.py
+++ b/ansible_collections/community/general/plugins/modules/sysupgrade.py
@@ -43,7 +43,7 @@ options:
fetch_only:
description:
- Fetch and verify files and create /bsd.upgrade but do not reboot.
- - Set to C(false) if you want sysupgrade to reboot. This will cause Ansible to error, as it expects the module to exit gracefully. See the examples.
+ - Set to V(false) if you want sysupgrade to reboot. This will cause Ansible to error, as it expects the module to exit gracefully. See the examples.
default: true
type: bool
installurl:
diff --git a/ansible_collections/community/general/plugins/modules/telegram.py b/ansible_collections/community/general/plugins/modules/telegram.py
index d13e90fd5..963c66353 100644
--- a/ansible_collections/community/general/plugins/modules/telegram.py
+++ b/ansible_collections/community/general/plugins/modules/telegram.py
@@ -20,7 +20,7 @@ short_description: Send notifications via telegram
description:
- Send notifications via telegram bot, to a verified group or user.
- - Also, the user may try to use any other telegram bot API method, if you specify I(api_method) argument.
+ - Also, the user may try to use any other telegram bot API method, if you specify O(api_method) argument.
notes:
- You will require a telegram account and create telegram bot to use this module.
extends_documentation_fragment:
@@ -47,7 +47,7 @@ options:
type: dict
description:
- Any parameters for the method.
- - For reference to default method, C(SendMessage), see U(https://core.telegram.org/bots/api#sendmessage).
+ - For reference to default method, V(SendMessage), see U(https://core.telegram.org/bots/api#sendmessage).
version_added: 2.0.0
'''
diff --git a/ansible_collections/community/general/plugins/modules/terraform.py b/ansible_collections/community/general/plugins/modules/terraform.py
index f9f809220..5906657c6 100644
--- a/ansible_collections/community/general/plugins/modules/terraform.py
+++ b/ansible_collections/community/general/plugins/modules/terraform.py
@@ -21,7 +21,8 @@ attributes:
check_mode:
support: full
diff_mode:
- support: none
+ support: full
+ version_added: 8.3.0
options:
state:
choices: ['planned', 'present', 'absent']
@@ -55,7 +56,7 @@ options:
version_added: 3.0.0
workspace:
description:
- - The terraform workspace to work with. This sets the C(TF_WORKSPACE) environmental variable
+ - The terraform workspace to work with. This sets the E(TF_WORKSPACE) environmental variable
that is used to override workspace selection. For more information about workspaces
have a look at U(https://developer.hashicorp.com/terraform/language/state/workspaces).
type: str
@@ -83,7 +84,6 @@ options:
description:
- The path to a variables file for Terraform to fill into the TF
configurations. This can accept a list of paths to multiple variables files.
- - Up until Ansible 2.9, this option was usable as I(variables_file).
type: list
elements: path
aliases: [ 'variables_file' ]
@@ -91,18 +91,18 @@ options:
description:
- A group of key-values pairs to override template variables or those in variables files.
By default, only string and number values are allowed, which are passed on unquoted.
- - Support complex variable structures (lists, dictionaries, numbers, and booleans) to reflect terraform variable syntax when I(complex_vars=true).
+ - Support complex variable structures (lists, dictionaries, numbers, and booleans) to reflect terraform variable syntax when O(complex_vars=true).
- Ansible integers or floats are mapped to terraform numbers.
- Ansible strings are mapped to terraform strings.
- Ansible dictionaries are mapped to terraform objects.
- Ansible lists are mapped to terraform lists.
- Ansible booleans are mapped to terraform booleans.
- - "B(Note) passwords passed as variables will be visible in the log output. Make sure to use I(no_log=true) in production!"
+ - "B(Note) passwords passed as variables will be visible in the log output. Make sure to use C(no_log=true) in production!"
type: dict
complex_vars:
description:
- Enable/disable capability to handle complex variable structures for C(terraform).
- - If C(true) the I(variables) also accepts dictionaries, lists, and booleans to be passed to C(terraform).
+ - If V(true) the O(variables) also accepts dictionaries, lists, and booleans to be passed to C(terraform).
Strings that are passed are correctly quoted.
- When disabled, supports only simple variables (strings, integers, and floats), and passes them on unquoted.
type: bool
@@ -135,7 +135,7 @@ options:
type: bool
overwrite_init:
description:
- - Run init even if C(.terraform/terraform.tfstate) already exists in I(project_path).
+ - Run init even if C(.terraform/terraform.tfstate) already exists in O(project_path).
default: true
type: bool
version_added: '3.2.0'
@@ -165,7 +165,7 @@ options:
check_destroy:
description:
- Apply only when no resources are destroyed. Note that this only prevents "destroy" actions,
- but not "destroy and re-create" actions. This option is ignored when I(state=absent).
+ but not "destroy and re-create" actions. This option is ignored when O(state=absent).
type: bool
default: false
version_added: '3.3.0'
@@ -251,7 +251,7 @@ EXAMPLES = """
RETURN = """
outputs:
type: complex
- description: A dictionary of all the TF outputs by their assigned name. Use C(.outputs.MyOutputName.value) to access the value.
+ description: A dictionary of all the TF outputs by their assigned name. Use RV(ignore:outputs.MyOutputName.value) to access the value.
returned: on success
sample: '{"bukkit_arn": {"sensitive": false, "type": "string", "value": "arn:aws:s3:::tf-test-bukkit"}'
contains:
@@ -325,7 +325,7 @@ def init_plugins(bin_path, project_path, backend_config, backend_config_files, i
for key, val in backend_config.items():
command.extend([
'-backend-config',
- shlex_quote('{0}={1}'.format(key, val))
+ '{0}={1}'.format(key, val)
])
if backend_config_files:
for f in backend_config_files:
@@ -376,7 +376,7 @@ def remove_workspace(bin_path, project_path, workspace):
_workspace_cmd(bin_path, project_path, 'delete', workspace)
-def build_plan(command, project_path, variables_args, state_file, targets, state, apply_args, plan_path=None):
+def build_plan(command, project_path, variables_args, state_file, targets, state, args, plan_path=None):
if plan_path is None:
f, plan_path = tempfile.mkstemp(suffix='.tfplan')
@@ -389,11 +389,15 @@ def build_plan(command, project_path, variables_args, state_file, targets, state
plan_command.append(c)
if state == "present":
- for a in apply_args:
+ for a in args:
local_command.remove(a)
for c in local_command[1:]:
plan_command.append(c)
+ if state == "absent":
+ for a in args:
+ plan_command.append(a)
+
plan_command.extend(['-input=false', '-no-color', '-detailed-exitcode', '-out', plan_path])
for t in targets:
@@ -429,6 +433,49 @@ def build_plan(command, project_path, variables_args, state_file, targets, state
))
+def get_diff(diff_output):
+ def get_tf_resource_address(e):
+ return e['resource']
+
+ diff_json_output = json.loads(diff_output)
+
+ # Ignore diff if resource_changes does not exists in tfplan
+ if 'resource_changes' in diff_json_output:
+ tf_reosource_changes = diff_json_output['resource_changes']
+ else:
+ module.warn("Cannot find resource_changes in terraform plan, diff/check ignored")
+ return False, {}
+
+ diff_after = []
+ diff_before = []
+ changed = False
+ for item in tf_reosource_changes:
+ item_change = item['change']
+ tf_before_state = {'resource': item['address'], 'change': item['change']['before']}
+ tf_after_state = {'resource': item['address'], 'change': item['change']['after']}
+
+ if item_change['actions'] == ['update'] or item_change['actions'] == ['delete', 'create']:
+ diff_before.append(tf_before_state)
+ diff_after.append(tf_after_state)
+ changed = True
+
+ if item_change['actions'] == ['delete']:
+ diff_before.append(tf_before_state)
+ changed = True
+
+ if item_change['actions'] == ['create']:
+ diff_after.append(tf_after_state)
+ changed = True
+
+ diff_before.sort(key=get_tf_resource_address)
+ diff_after.sort(key=get_tf_resource_address)
+
+ return changed, dict(
+ before=({'data': diff_before}),
+ after=({'data': diff_after}),
+ )
+
+
def main():
global module
module = AnsibleModule(
@@ -514,7 +561,7 @@ def main():
def format_args(vars):
if isinstance(vars, str):
- return '"{string}"'.format(string=vars.replace('\\', '\\\\').replace('"', '\\"'))
+ return '"{string}"'.format(string=vars.replace('\\', '\\\\').replace('"', '\\"')).replace('\n', '\\n')
elif isinstance(vars, bool):
if vars:
return 'true'
@@ -620,6 +667,23 @@ def main():
"Consider switching the 'check_destroy' to false to suppress this error")
command.append(plan_file)
+ result_diff = dict()
+ if module._diff or module.check_mode:
+ if state == 'absent':
+ plan_absent_args = ['-destroy']
+ plan_file, needs_application, out, err, command = build_plan(command, project_path, variables_args, state_file,
+ module.params.get('targets'), state, plan_absent_args, plan_file)
+ diff_command = [command[0], 'show', '-json', plan_file]
+ rc, diff_output, err = module.run_command(diff_command, check_rc=False, cwd=project_path)
+ changed, result_diff = get_diff(diff_output)
+ if rc != 0:
+ if workspace_ctx["current"] != workspace:
+ select_workspace(command[0], project_path, workspace_ctx["current"])
+ module.fail_json(msg=err.rstrip(), rc=rc, stdout=out,
+ stdout_lines=out.splitlines(), stderr=err,
+ stderr_lines=err.splitlines(),
+ cmd=' '.join(command))
+
if needs_application and not module.check_mode and state != 'planned':
rc, out, err = module.run_command(command, check_rc=False, cwd=project_path)
if rc != 0:
@@ -652,7 +716,18 @@ def main():
if state == 'absent' and workspace != 'default' and purge_workspace is True:
remove_workspace(command[0], project_path, workspace)
- module.exit_json(changed=changed, state=state, workspace=workspace, outputs=outputs, stdout=out, stderr=err, command=' '.join(command))
+ result = {
+ 'state': state,
+ 'workspace': workspace,
+ 'outputs': outputs,
+ 'stdout': out,
+ 'stderr': err,
+ 'command': ' '.join(command),
+ 'changed': changed,
+ 'diff': result_diff,
+ }
+
+ module.exit_json(**result)
if __name__ == '__main__':
diff --git a/ansible_collections/community/general/plugins/modules/timezone.py b/ansible_collections/community/general/plugins/modules/timezone.py
index 05849e4bb..e027290e8 100644
--- a/ansible_collections/community/general/plugins/modules/timezone.py
+++ b/ansible_collections/community/general/plugins/modules/timezone.py
@@ -22,9 +22,6 @@ description:
On AIX, C(chtz) is used.
- Make sure that the zoneinfo files are installed with the appropriate OS package, like C(tzdata) (usually always installed,
when not using a minimal installation like Alpine Linux).
- - As of Ansible 2.3 support was added for SmartOS and BSDs.
- - As of Ansible 2.4 support was added for macOS.
- - As of Ansible 2.9 support was added for AIX 6.1+
- Windows and HPUX are not supported, please let us know if you find any other OS/distro in which this fails.
extends_documentation_fragment:
- community.general.attributes
@@ -53,8 +50,9 @@ options:
choices: [ local, UTC ]
notes:
- On SmartOS the C(sm-set-timezone) utility (part of the smtools package) is required to set the zone timezone
- - On AIX only Olson/tz database timezones are useable (POSIX is not supported).
- - An OS reboot is also required on AIX for the new timezone setting to take effect.
+ - On AIX only Olson/tz database timezones are usable (POSIX is not supported).
+ An OS reboot is also required on AIX for the new timezone setting to take effect.
+ Note that AIX 6.1+ is needed (OS level 61 or newer).
author:
- Shinichi TAMURA (@tmshn)
- Jasper Lievisse Adriaanse (@jasperla)
@@ -447,7 +445,7 @@ class NosystemdTimezone(Timezone):
filename: The name of the file to edit.
regexp: The regular expression to search with.
value: The line which will be inserted.
- key: For what key the file is being editted.
+ key: For what key the file is being edited.
"""
# Read the file
try:
@@ -725,7 +723,7 @@ class BSDTimezone(Timezone):
localtime_file = '/etc/localtime'
# Strategy 1:
- # If /etc/localtime does not exist, assum the timezone is UTC.
+ # If /etc/localtime does not exist, assume the timezone is UTC.
if not os.path.exists(localtime_file):
self.module.warn('Could not read /etc/localtime. Assuming UTC.')
return 'UTC'
diff --git a/ansible_collections/community/general/plugins/modules/udm_dns_record.py b/ansible_collections/community/general/plugins/modules/udm_dns_record.py
index 849c84a2d..99fe10c63 100644
--- a/ansible_collections/community/general/plugins/modules/udm_dns_record.py
+++ b/ansible_collections/community/general/plugins/modules/udm_dns_record.py
@@ -21,7 +21,7 @@ description:
It uses the python API of the UCS to create a new object or edit it."
requirements:
- Univention
- - ipaddress (for I(type=ptr_record))
+ - ipaddress (for O(type=ptr_record))
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -48,21 +48,21 @@ options:
required: true
description:
- Corresponding DNS zone for this record, e.g. example.com.
- - For PTR records this has to be the full reverse zone (for example C(1.1.192.in-addr.arpa)).
+ - For PTR records this has to be the full reverse zone (for example V(1.1.192.in-addr.arpa)).
type:
type: str
required: true
description:
- - "Define the record type. C(host_record) is a A or AAAA record,
- C(alias) is a CNAME, C(ptr_record) is a PTR record, C(srv_record)
- is a SRV record and C(txt_record) is a TXT record."
- - "The available choices are: C(host_record), C(alias), C(ptr_record), C(srv_record), C(txt_record)."
+ - "Define the record type. V(host_record) is a A or AAAA record,
+ V(alias) is a CNAME, V(ptr_record) is a PTR record, V(srv_record)
+ is a SRV record and V(txt_record) is a TXT record."
+ - "The available choices are: V(host_record), V(alias), V(ptr_record), V(srv_record), V(txt_record)."
data:
type: dict
default: {}
description:
- - "Additional data for this record, e.g. ['a': '192.0.2.1'].
- Required if I(state=present)."
+ - "Additional data for this record, for example V({'a': '192.0.2.1'})."
+ - Required if O(state=present).
'''
diff --git a/ansible_collections/community/general/plugins/modules/udm_dns_zone.py b/ansible_collections/community/general/plugins/modules/udm_dns_zone.py
index 19f24fa1c..387d5cc45 100644
--- a/ansible_collections/community/general/plugins/modules/udm_dns_zone.py
+++ b/ansible_collections/community/general/plugins/modules/udm_dns_zone.py
@@ -38,26 +38,26 @@ options:
required: true
description:
- Define if the zone is a forward or reverse DNS zone.
- - "The available choices are: C(forward_zone), C(reverse_zone)."
+ - "The available choices are: V(forward_zone), V(reverse_zone)."
zone:
type: str
required: true
description:
- - DNS zone name, e.g. C(example.com).
+ - DNS zone name, for example V(example.com).
aliases: [name]
nameserver:
type: list
elements: str
default: []
description:
- - List of appropriate name servers. Required if I(state=present).
+ - List of appropriate name servers. Required if O(state=present).
interfaces:
type: list
elements: str
default: []
description:
- List of interface IP addresses, on which the server should
- response this zone. Required if I(state=present).
+ response this zone. Required if O(state=present).
refresh:
type: int
diff --git a/ansible_collections/community/general/plugins/modules/udm_share.py b/ansible_collections/community/general/plugins/modules/udm_share.py
index 274391335..8ae243b3d 100644
--- a/ansible_collections/community/general/plugins/modules/udm_share.py
+++ b/ansible_collections/community/general/plugins/modules/udm_share.py
@@ -42,18 +42,17 @@ options:
host:
required: false
description:
- - Host FQDN (server which provides the share), e.g. C({{
- ansible_fqdn }}). Required if I(state=present).
+ - Host FQDN (server which provides the share), for example V({{ ansible_fqdn }}). Required if O(state=present).
type: str
path:
required: false
description:
- - Directory on the providing server, e.g. C(/home). Required if I(state=present).
+ - Directory on the providing server, for example V(/home). Required if O(state=present).
type: path
sambaName:
required: false
description:
- - Windows name. Required if I(state=present).
+ - Windows name. Required if O(state=present).
type: str
aliases: [ samba_name ]
ou:
diff --git a/ansible_collections/community/general/plugins/modules/udm_user.py b/ansible_collections/community/general/plugins/modules/udm_user.py
index 05c5ad359..dcbf0ec85 100644
--- a/ansible_collections/community/general/plugins/modules/udm_user.py
+++ b/ansible_collections/community/general/plugins/modules/udm_user.py
@@ -42,15 +42,15 @@ options:
type: str
firstname:
description:
- - First name. Required if I(state=present).
+ - First name. Required if O(state=present).
type: str
lastname:
description:
- - Last name. Required if I(state=present).
+ - Last name. Required if O(state=present).
type: str
password:
description:
- - Password. Required if I(state=present).
+ - Password. Required if O(state=present).
type: str
birthday:
description:
@@ -103,13 +103,13 @@ options:
description:
- "POSIX groups, the LDAP DNs of the groups will be found with the
LDAP filter for each group as $GROUP:
- C((&(objectClass=posixGroup)(cn=$GROUP)))."
+ V((&(objectClass=posixGroup\\)(cn=$GROUP\\)\\))."
type: list
elements: str
home_share:
description:
- "Home NFS share. Must be a LDAP DN, e.g.
- C(cn=home,cn=shares,ou=school,dc=example,dc=com)."
+ V(cn=home,cn=shares,ou=school,dc=example,dc=com)."
aliases: [ homeShare ]
type: str
home_share_path:
@@ -126,7 +126,7 @@ options:
elements: str
homedrive:
description:
- - Windows home drive, e.g. C("H:").
+ - Windows home drive, for example V("H:").
type: str
mail_alternative_address:
default: []
@@ -189,7 +189,7 @@ options:
primary_group:
description:
- Primary group. This must be the group LDAP DN.
- - If not specified, it defaults to C(cn=Domain Users,cn=groups,$LDAP_BASE_DN).
+ - If not specified, it defaults to V(cn=Domain Users,cn=groups,$LDAP_BASE_DN).
aliases: [ primaryGroup ]
type: str
profilepath:
@@ -224,7 +224,7 @@ options:
default: []
sambahome:
description:
- - Windows home path, e.g. C('\\$FQDN\$USERNAME').
+ - Windows home path, for example V('\\\\$FQDN\\$USERNAME').
type: str
scriptpath:
description:
@@ -253,7 +253,7 @@ options:
type: str
title:
description:
- - Title, e.g. C(Prof.).
+ - Title, for example V(Prof.).
type: str
unixhome:
description:
@@ -262,33 +262,33 @@ options:
type: str
userexpiry:
description:
- - Account expiry date, e.g. C(1999-12-31).
+ - Account expiry date, for example V(1999-12-31).
- If not specified, it defaults to the current day plus one year.
type: str
position:
default: ''
description:
- "Define the whole position of users object inside the LDAP tree,
- e.g. C(cn=employee,cn=users,ou=school,dc=example,dc=com)."
+ for example V(cn=employee,cn=users,ou=school,dc=example,dc=com)."
type: str
update_password:
default: always
choices: [ always, on_create ]
description:
- - "C(always) will update passwords if they differ.
- C(on_create) will only set the password for newly created users."
+ - "V(always) will update passwords if they differ.
+ V(on_create) will only set the password for newly created users."
type: str
ou:
default: ''
description:
- - "Organizational Unit inside the LDAP Base DN, e.g. C(school) for
+ - "Organizational Unit inside the LDAP Base DN, for example V(school) for
LDAP OU C(ou=school,dc=example,dc=com)."
type: str
subpath:
default: 'cn=users'
description:
- - "LDAP subpath inside the organizational unit, e.g.
- C(cn=teachers,cn=users) for LDAP container
+ - "LDAP subpath inside the organizational unit, for example
+ V(cn=teachers,cn=users) for LDAP container
C(cn=teachers,cn=users,dc=example,dc=com)."
type: str
'''
@@ -302,7 +302,7 @@ EXAMPLES = '''
firstname: Foo
lastname: Bar
-- name: Create a user with the DN C(uid=foo,cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com)
+- name: Create a user with the DN uid=foo,cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com
community.general.udm_user:
name: foo
password: secure_password
@@ -312,7 +312,7 @@ EXAMPLES = '''
subpath: 'cn=teachers,cn=users'
# or define the position
-- name: Create a user with the DN C(uid=foo,cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com)
+- name: Create a user with the DN uid=foo,cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com
community.general.udm_user:
name: foo
password: secure_password
diff --git a/ansible_collections/community/general/plugins/modules/ufw.py b/ansible_collections/community/general/plugins/modules/ufw.py
index 45c98fd63..5d187793b 100644
--- a/ansible_collections/community/general/plugins/modules/ufw.py
+++ b/ansible_collections/community/general/plugins/modules/ufw.py
@@ -35,10 +35,10 @@ attributes:
options:
state:
description:
- - C(enabled) reloads firewall and enables firewall on boot.
- - C(disabled) unloads firewall and disables firewall on boot.
- - C(reloaded) reloads firewall.
- - C(reset) disables and resets firewall to installation defaults.
+ - V(enabled) reloads firewall and enables firewall on boot.
+ - V(disabled) unloads firewall and disables firewall on boot.
+ - V(reloaded) reloads firewall.
+ - V(reset) disables and resets firewall to installation defaults.
type: str
choices: [ disabled, enabled, reloaded, reset ]
default:
@@ -50,7 +50,7 @@ options:
direction:
description:
- Select direction for a rule or default policy command. Mutually
- exclusive with I(interface_in) and I(interface_out).
+ exclusive with O(interface_in) and O(interface_out).
type: str
choices: [ in, incoming, out, outgoing, routed ]
logging:
@@ -62,24 +62,24 @@ options:
description:
- Insert the corresponding rule as rule number NUM.
- Note that ufw numbers rules starting with 1.
- - If I(delete=true) and a value is provided for I(insert),
- then I(insert) is ignored.
+ - If O(delete=true) and a value is provided for O(insert),
+ then O(insert) is ignored.
type: int
insert_relative_to:
description:
- - Allows to interpret the index in I(insert) relative to a position.
- - C(zero) interprets the rule number as an absolute index (i.e. 1 is
+ - Allows to interpret the index in O(insert) relative to a position.
+ - V(zero) interprets the rule number as an absolute index (i.e. 1 is
the first rule).
- - C(first-ipv4) interprets the rule number relative to the index of the
+ - V(first-ipv4) interprets the rule number relative to the index of the
first IPv4 rule, or relative to the position where the first IPv4 rule
would be if there is currently none.
- - C(last-ipv4) interprets the rule number relative to the index of the
+ - V(last-ipv4) interprets the rule number relative to the index of the
last IPv4 rule, or relative to the position where the last IPv4 rule
would be if there is currently none.
- - C(first-ipv6) interprets the rule number relative to the index of the
+ - V(first-ipv6) interprets the rule number relative to the index of the
first IPv6 rule, or relative to the position where the first IPv6 rule
would be if there is currently none.
- - C(last-ipv6) interprets the rule number relative to the index of the
+ - V(last-ipv6) interprets the rule number relative to the index of the
last IPv6 rule, or relative to the position where the last IPv6 rule
would be if there is currently none.
type: str
@@ -130,32 +130,32 @@ options:
delete:
description:
- Delete rule.
- - If I(delete=true) and a value is provided for I(insert),
- then I(insert) is ignored.
+ - If O(delete=true) and a value is provided for O(insert),
+ then O(insert) is ignored.
type: bool
default: false
interface:
description:
- Specify interface for the rule. The direction (in or out) used
- for the interface depends on the value of I(direction). See
- I(interface_in) and I(interface_out) for routed rules that needs
+ for the interface depends on the value of O(direction). See
+ O(interface_in) and O(interface_out) for routed rules that needs
to supply both an input and output interface. Mutually
- exclusive with I(interface_in) and I(interface_out).
+ exclusive with O(interface_in) and O(interface_out).
type: str
aliases: [ if ]
interface_in:
description:
- Specify input interface for the rule. This is mutually
- exclusive with I(direction) and I(interface). However, it is
- compatible with I(interface_out) for routed rules.
+ exclusive with O(direction) and O(interface). However, it is
+ compatible with O(interface_out) for routed rules.
type: str
aliases: [ if_in ]
version_added: '0.2.0'
interface_out:
description:
- Specify output interface for the rule. This is mutually
- exclusive with I(direction) and I(interface). However, it is
- compatible with I(interface_in) for routed rules.
+ exclusive with O(direction) and O(interface). However, it is
+ compatible with O(interface_in) for routed rules.
type: str
aliases: [ if_out ]
version_added: '0.2.0'
diff --git a/ansible_collections/community/general/plugins/modules/urpmi.py b/ansible_collections/community/general/plugins/modules/urpmi.py
index 34e099e4d..75c0af90f 100644
--- a/ansible_collections/community/general/plugins/modules/urpmi.py
+++ b/ansible_collections/community/general/plugins/modules/urpmi.py
@@ -16,7 +16,7 @@ DOCUMENTATION = '''
module: urpmi
short_description: Urpmi manager
description:
- - Manages packages with I(urpmi) (such as for Mageia or Mandriva)
+ - Manages packages with C(urpmi) (such as for Mageia or Mandriva)
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -45,19 +45,19 @@ options:
default: false
no_recommends:
description:
- - Corresponds to the C(--no-recommends) option for I(urpmi).
+ - Corresponds to the C(--no-recommends) option for C(urpmi).
type: bool
default: true
force:
description:
- Assume "yes" is the answer to any question urpmi has to ask.
- Corresponds to the C(--force) option for I(urpmi).
+ Corresponds to the C(--force) option for C(urpmi).
type: bool
default: true
root:
description:
- Specifies an alternative install root, relative to which all packages will be installed.
- Corresponds to the C(--root) option for I(urpmi).
+ Corresponds to the C(--root) option for C(urpmi).
aliases: [ installroot ]
type: str
author:
diff --git a/ansible_collections/community/general/plugins/modules/usb_facts.py b/ansible_collections/community/general/plugins/modules/usb_facts.py
new file mode 100644
index 000000000..340c71ee5
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/usb_facts.py
@@ -0,0 +1,113 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2024, Max Maxopoly <max@dermax.org>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: usb_facts
+short_description: Allows listing information about USB devices
+version_added: 8.5.0
+description:
+ - Allows retrieving information about available USB devices through C(lsusb).
+author:
+ - Max Maxopoly (@maxopoly)
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.facts
+ - community.general.attributes.facts_module
+requirements:
+ - lsusb binary on PATH (usually installed through the package usbutils and preinstalled on many systems)
+'''
+
+EXAMPLES = '''
+- name: Get information about USB devices
+ community.general.usb_facts:
+
+- name: Print information about USB devices
+ ansible.builtin.debug:
+ msg: "On bus {{ item.bus }} device {{ item.device }} with id {{ item.id }} is {{ item.name }}"
+ loop: "{{ ansible_facts.usb_devices }}"
+'''
+
+RETURN = r'''
+ansible_facts:
+ description: Dictionary containing details of connected USB devices.
+ returned: always
+ type: dict
+ contains:
+ usb_devices:
+ description: A list of USB devices available.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ bus:
+ description: The bus the usb device is connected to.
+ returned: always
+ type: str
+ sample: "001"
+ device:
+ description: The device number occupied on the bus.
+ returned: always
+ type: str
+ sample: "002"
+ id:
+ description: ID of the USB device.
+ returned: always
+ type: str
+ sample: "1d6b:0002"
+ name:
+ description: Human readable name of the device.
+ returned: always
+ type: str
+ sample: Linux Foundation 2.0 root hub
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+def parse_lsusb(module, lsusb_path):
+ rc, stdout, stderr = module.run_command(lsusb_path, check_rc=True)
+ regex = re.compile(r'^Bus (\d{3}) Device (\d{3}): ID ([0-9a-f]{4}:[0-9a-f]{4}) (.*)$')
+ usb_devices = []
+ for line in stdout.splitlines():
+ match = re.match(regex, line)
+ if not match:
+ module.fail_json(msg="failed to parse unknown lsusb output %s" % (line), stdout=stdout, stderr=stderr)
+ current_device = {
+ 'bus': match.group(1),
+ 'device': match.group(2),
+ 'id': match.group(3),
+ 'name': match.group(4)
+ }
+ usb_devices.append(current_device)
+ return_value = {
+ "usb_devices": usb_devices
+ }
+ module.exit_json(msg="parsed %s USB devices" % (len(usb_devices)), stdout=stdout, stderr=stderr, ansible_facts=return_value)
+
+
+def main():
+ module = AnsibleModule(
+ {},
+ supports_check_mode=True
+ )
+
+ # Set LANG env since we parse stdout
+ module.run_command_environ_update = dict(LANGUAGE='C', LC_ALL='C')
+
+ lsusb_path = module.get_bin_path('lsusb', required=True)
+ parse_lsusb(module, lsusb_path)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/utm_proxy_location.py b/ansible_collections/community/general/plugins/modules/utm_proxy_location.py
index c22de7b92..736f564d5 100644
--- a/ansible_collections/community/general/plugins/modules/utm_proxy_location.py
+++ b/ansible_collections/community/general/plugins/modules/utm_proxy_location.py
@@ -170,7 +170,7 @@ result:
description: The list of the denied network names
type: list
hot_standby:
- description: Use hot standy
+ description: Use hot standby
type: bool
path:
description: Path name
diff --git a/ansible_collections/community/general/plugins/modules/utm_proxy_location_info.py b/ansible_collections/community/general/plugins/modules/utm_proxy_location_info.py
index 58a32107b..4e4ba9b13 100644
--- a/ansible_collections/community/general/plugins/modules/utm_proxy_location_info.py
+++ b/ansible_collections/community/general/plugins/modules/utm_proxy_location_info.py
@@ -88,7 +88,7 @@ result:
description: The list of the denied network names
type: list
hot_standby:
- description: Use hot standy
+ description: Use hot standby
type: bool
path:
description: Path name
diff --git a/ansible_collections/community/general/plugins/modules/vdo.py b/ansible_collections/community/general/plugins/modules/vdo.py
index f1ea40e2e..8b0e74596 100644
--- a/ansible_collections/community/general/plugins/modules/vdo.py
+++ b/ansible_collections/community/general/plugins/modules/vdo.py
@@ -57,7 +57,7 @@ options:
activated:
description:
- The "activate" status for a VDO volume. If this is set
- to C(false), the VDO volume cannot be started, and it will
+ to V(false), the VDO volume cannot be started, and it will
not start on system startup. However, on initial
creation, a VDO volume with "activated" set to "off"
will be running, until stopped. This is the default
diff --git a/ansible_collections/community/general/plugins/modules/vertica_info.py b/ansible_collections/community/general/plugins/modules/vertica_info.py
index 3106be3b3..93ccc6844 100644
--- a/ansible_collections/community/general/plugins/modules/vertica_info.py
+++ b/ansible_collections/community/general/plugins/modules/vertica_info.py
@@ -15,8 +15,6 @@ module: vertica_info
short_description: Gathers Vertica database facts
description:
- Gathers Vertica database information.
- - This module was called C(vertica_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(community.general.vertica_info) module no longer returns C(ansible_facts)!
extends_documentation_fragment:
- community.general.attributes
- community.general.attributes.info_module
diff --git a/ansible_collections/community/general/plugins/modules/vertica_role.py b/ansible_collections/community/general/plugins/modules/vertica_role.py
index 704594a12..a1ef40c7a 100644
--- a/ansible_collections/community/general/plugins/modules/vertica_role.py
+++ b/ansible_collections/community/general/plugins/modules/vertica_role.py
@@ -36,7 +36,7 @@ options:
type: str
state:
description:
- - Whether to create C(present), drop C(absent) or lock C(locked) a role.
+ - Whether to create V(present), drop V(absent) or lock V(locked) a role.
choices: ['present', 'absent']
default: present
type: str
diff --git a/ansible_collections/community/general/plugins/modules/vertica_schema.py b/ansible_collections/community/general/plugins/modules/vertica_schema.py
index 01f8f721e..95e434ef3 100644
--- a/ansible_collections/community/general/plugins/modules/vertica_schema.py
+++ b/ansible_collections/community/general/plugins/modules/vertica_schema.py
@@ -50,7 +50,7 @@ options:
type: str
state:
description:
- - Whether to create C(present), or drop C(absent) a schema.
+ - Whether to create V(present), or drop V(absent) a schema.
default: present
choices: ['present', 'absent']
type: str
diff --git a/ansible_collections/community/general/plugins/modules/vertica_user.py b/ansible_collections/community/general/plugins/modules/vertica_user.py
index a6a5b5951..7a62bec44 100644
--- a/ansible_collections/community/general/plugins/modules/vertica_user.py
+++ b/ansible_collections/community/general/plugins/modules/vertica_user.py
@@ -8,7 +8,7 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
+DOCUMENTATION = r'''
---
module: vertica_user
short_description: Adds or removes Vertica database users and assigns roles
@@ -44,7 +44,7 @@ options:
- The user's password encrypted by the MD5 algorithm.
- The password must be generated with the format C("md5" + md5[password + username]),
resulting in a total of 35 characters. An easy way to do this is by querying
- the Vertica database with select 'md5'||md5('<user_password><user_name>').
+ the Vertica database with select V('md5'||md5('<user_password><user_name>'\)).
type: str
expired:
description:
@@ -53,7 +53,7 @@ options:
ldap:
description:
- Set to true if users are authenticated via LDAP.
- - The user will be created with password expired and set to I($ldap$).
+ - The user will be created with password expired and set to V($ldap$).
type: bool
roles:
description:
@@ -62,7 +62,7 @@ options:
type: str
state:
description:
- - Whether to create C(present), drop C(absent) or lock C(locked) a user.
+ - Whether to create (V(present)), drop (V(absent)), or lock (V(locked)) a user.
choices: ['present', 'absent', 'locked']
default: present
type: str
diff --git a/ansible_collections/community/general/plugins/modules/vmadm.py b/ansible_collections/community/general/plugins/modules/vmadm.py
index 56ade17e4..bfe614837 100644
--- a/ansible_collections/community/general/plugins/modules/vmadm.py
+++ b/ansible_collections/community/general/plugins/modules/vmadm.py
@@ -39,7 +39,7 @@ options:
choices: [ joyent, joyent-minimal, lx, kvm, bhyve ]
default: joyent
description:
- - Type of virtual machine. The C(bhyve) option was added in community.general 0.2.0.
+ - Type of virtual machine. The V(bhyve) option was added in community.general 0.2.0.
type: str
boot:
required: false
@@ -50,7 +50,7 @@ options:
required: false
description:
- Sets a limit on the amount of CPU time that can be used by a VM.
- Use C(0) for no cap.
+ Use V(0) for no cap.
type: int
cpu_shares:
required: false
@@ -95,7 +95,7 @@ options:
docker:
required: false
description:
- - Docker images need this flag enabled along with the I(brand) set to C(lx).
+ - Docker images need this flag enabled along with the O(brand) set to C(lx).
type: bool
filesystems:
required: false
@@ -147,8 +147,8 @@ options:
internal_metadata_namespace:
required: false
description:
- - List of namespaces to be set as I(internal_metadata-only); these namespaces
- will come from I(internal_metadata) rather than I(customer_metadata).
+ - List of namespaces to be set as C(internal_metadata-only); these namespaces
+ will come from O(internal_metadata) rather than O(customer_metadata).
type: str
kernel_version:
required: false
@@ -164,7 +164,7 @@ options:
required: false
description:
- Resolvers in C(/etc/resolv.conf) will be updated when updating
- the I(resolvers) property.
+ the O(resolvers) property.
type: bool
max_locked_memory:
required: false
@@ -263,11 +263,11 @@ options:
choices: [ present, running, absent, deleted, stopped, created, restarted, rebooted ]
default: running
description:
- - States for the VM to be in. Please note that C(present), C(stopped) and C(restarted)
- operate on a VM that is currently provisioned. C(present) means that the VM will be
- created if it was absent, and that it will be in a running state. C(absent) will
+ - States for the VM to be in. Please note that V(present), V(stopped) and V(restarted)
+ operate on a VM that is currently provisioned. V(present) means that the VM will be
+ created if it was absent, and that it will be in a running state. V(absent) will
shutdown the zone before removing it.
- C(stopped) means the zone will be created if it doesn't exist already, before shutting
+ V(stopped) means the zone will be created if it does not exist already, before shutting
it down.
type: str
tmpfs:
@@ -278,7 +278,7 @@ options:
uuid:
required: false
description:
- - UUID of the VM. Can either be a full UUID or C(*) for all VMs.
+ - UUID of the VM. Can either be a full UUID or V(*) for all VMs.
type: str
vcpus:
required: false
@@ -309,8 +309,8 @@ options:
vnc_port:
required: false
description:
- - TCP port to listen of the VNC server. Or set C(0) for random,
- or C(-1) to disable.
+ - TCP port to listen of the VNC server. Or set V(0) for random,
+ or V(-1) to disable.
type: int
zfs_data_compression:
required: false
@@ -354,8 +354,6 @@ options:
description:
- ZFS pool the VM's zone dataset will be created in.
type: str
-requirements:
- - python >= 2.6
'''
EXAMPLES = '''
diff --git a/ansible_collections/community/general/plugins/modules/wdc_redfish_command.py b/ansible_collections/community/general/plugins/modules/wdc_redfish_command.py
index a51d454d9..93c4811af 100644
--- a/ansible_collections/community/general/plugins/modules/wdc_redfish_command.py
+++ b/ansible_collections/community/general/plugins/modules/wdc_redfish_command.py
@@ -38,11 +38,11 @@ options:
elements: str
baseuri:
description:
- - Base URI of OOB controller. Must include this or I(ioms).
+ - Base URI of OOB controller. Must include this or O(ioms).
type: str
ioms:
description:
- - List of IOM FQDNs for the enclosure. Must include this or I(baseuri).
+ - List of IOM FQDNs for the enclosure. Must include this or O(baseuri).
type: list
elements: str
username:
@@ -65,7 +65,7 @@ options:
resource_id:
required: false
description:
- - ID of the component to modify, such as C(Enclosure), C(IOModuleAFRU), C(PowerSupplyBFRU), C(FanExternalFRU3), or C(FanInternalFRU).
+ - ID of the component to modify, such as V(Enclosure), V(IOModuleAFRU), V(PowerSupplyBFRU), V(FanExternalFRU3), or V(FanInternalFRU).
type: str
version_added: 5.4.0
update_image_uri:
diff --git a/ansible_collections/community/general/plugins/modules/wdc_redfish_info.py b/ansible_collections/community/general/plugins/modules/wdc_redfish_info.py
index 038e1a72d..03ae67fcf 100644
--- a/ansible_collections/community/general/plugins/modules/wdc_redfish_info.py
+++ b/ansible_collections/community/general/plugins/modules/wdc_redfish_info.py
@@ -33,11 +33,11 @@ options:
elements: str
baseuri:
description:
- - Base URI of OOB controller. Must include this or I(ioms).
+ - Base URI of OOB controller. Must include this or O(ioms).
type: str
ioms:
description:
- - List of IOM FQDNs for the enclosure. Must include this or I(baseuri).
+ - List of IOM FQDNs for the enclosure. Must include this or O(baseuri).
type: list
elements: str
username:
diff --git a/ansible_collections/community/general/plugins/modules/webfaction_app.py b/ansible_collections/community/general/plugins/modules/webfaction_app.py
index 7a4702675..81bfc8b68 100644
--- a/ansible_collections/community/general/plugins/modules/webfaction_app.py
+++ b/ansible_collections/community/general/plugins/modules/webfaction_app.py
@@ -19,6 +19,12 @@ __metaclass__ = type
DOCUMENTATION = '''
---
+
+deprecated:
+ removed_in: 9.0.0
+ why: the endpoints this module relies on do not exist any more and do not resolve to IPs in DNS.
+ alternative: no known alternative at this point
+
module: webfaction_app
short_description: Add or remove applications on a Webfaction host
description:
@@ -27,9 +33,9 @@ author: Quentin Stafford-Fraser (@quentinsf)
notes:
- >
You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
- The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
- your host, you may want to add C(serial: 1) to the plays.
- - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you do not specify C(localhost) as
+ your host, you may want to add C(serial=1) to the plays.
+ - See L(the webfaction API, https://docs.webfaction.com/xmlrpc-api/) for more info.
extends_documentation_fragment:
- community.general.attributes
diff --git a/ansible_collections/community/general/plugins/modules/webfaction_db.py b/ansible_collections/community/general/plugins/modules/webfaction_db.py
index c4742cb21..5428de5b6 100644
--- a/ansible_collections/community/general/plugins/modules/webfaction_db.py
+++ b/ansible_collections/community/general/plugins/modules/webfaction_db.py
@@ -16,6 +16,12 @@ __metaclass__ = type
DOCUMENTATION = '''
---
+
+deprecated:
+ removed_in: 9.0.0
+ why: the endpoints this module relies on do not exist any more and do not resolve to IPs in DNS.
+ alternative: no known alternative at this point
+
module: webfaction_db
short_description: Add or remove a database on Webfaction
description:
@@ -24,9 +30,9 @@ author: Quentin Stafford-Fraser (@quentinsf)
notes:
- >
You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
- The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
- your host, you may want to add C(serial: 1) to the plays.
- - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you do not specify C(localhost) as
+ your host, you may want to add C(serial=1) to the plays.
+ - See L(the webfaction API, https://docs.webfaction.com/xmlrpc-api/) for more info.
extends_documentation_fragment:
- community.general.attributes
attributes:
diff --git a/ansible_collections/community/general/plugins/modules/webfaction_domain.py b/ansible_collections/community/general/plugins/modules/webfaction_domain.py
index 9bffec3cd..4c87a539a 100644
--- a/ansible_collections/community/general/plugins/modules/webfaction_domain.py
+++ b/ansible_collections/community/general/plugins/modules/webfaction_domain.py
@@ -13,19 +13,25 @@ __metaclass__ = type
DOCUMENTATION = '''
---
+
+deprecated:
+ removed_in: 9.0.0
+ why: the endpoints this module relies on do not exist any more and do not resolve to IPs in DNS.
+ alternative: no known alternative at this point
+
module: webfaction_domain
short_description: Add or remove domains and subdomains on Webfaction
description:
- Add or remove domains or subdomains on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction.
author: Quentin Stafford-Fraser (@quentinsf)
notes:
- - If you are I(deleting) domains by using I(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted.
- If you don't specify subdomains, the domain will be deleted.
+ - If you are I(deleting) domains by using O(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted.
+ If you do not specify subdomains, the domain will be deleted.
- >
You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
- The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
- your host, you may want to add C(serial: 1) to the plays.
- - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you do not specify C(localhost) as
+ your host, you may want to add C(serial=1) to the plays.
+ - See L(the webfaction API, https://docs.webfaction.com/xmlrpc-api/) for more info.
extends_documentation_fragment:
- community.general.attributes
diff --git a/ansible_collections/community/general/plugins/modules/webfaction_mailbox.py b/ansible_collections/community/general/plugins/modules/webfaction_mailbox.py
index 2b543c5b1..119dfd283 100644
--- a/ansible_collections/community/general/plugins/modules/webfaction_mailbox.py
+++ b/ansible_collections/community/general/plugins/modules/webfaction_mailbox.py
@@ -13,6 +13,12 @@ __metaclass__ = type
DOCUMENTATION = '''
---
+
+deprecated:
+ removed_in: 9.0.0
+ why: the endpoints this module relies on do not exist any more and do not resolve to IPs in DNS.
+ alternative: no known alternative at this point
+
module: webfaction_mailbox
short_description: Add or remove mailboxes on Webfaction
description:
@@ -21,9 +27,9 @@ author: Quentin Stafford-Fraser (@quentinsf)
notes:
- >
You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
- The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
- your host, you may want to add C(serial: 1) to the plays.
- - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you do not specify C(localhost) as
+ your host, you may want to add C(serial=1) to the plays.
+ - See L(the webfaction API, https://docs.webfaction.com/xmlrpc-api/) for more info.
extends_documentation_fragment:
- community.general.attributes
diff --git a/ansible_collections/community/general/plugins/modules/webfaction_site.py b/ansible_collections/community/general/plugins/modules/webfaction_site.py
index 385f55211..7795c45fe 100644
--- a/ansible_collections/community/general/plugins/modules/webfaction_site.py
+++ b/ansible_collections/community/general/plugins/modules/webfaction_site.py
@@ -13,20 +13,26 @@ __metaclass__ = type
DOCUMENTATION = '''
---
+
+deprecated:
+ removed_in: 9.0.0
+ why: the endpoints this module relies on do not exist any more and do not resolve to IPs in DNS.
+ alternative: no known alternative at this point
+
module: webfaction_site
short_description: Add or remove a website on a Webfaction host
description:
- Add or remove a website on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction.
author: Quentin Stafford-Fraser (@quentinsf)
notes:
- - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP
+ - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you do not need to know the IP
address. You can use a DNS name.
- If a site of the same name exists in the account but on a different host, the operation will exit.
- >
You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
- The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
- your host, you may want to add C(serial: 1) to the plays.
- - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you do not specify C(localhost) as
+ your host, you may want to add C(serial=1) to the plays.
+ - See L(the webfaction API, https://docs.webfaction.com/xmlrpc-api/) for more info.
extends_documentation_fragment:
- community.general.attributes
diff --git a/ansible_collections/community/general/plugins/modules/xattr.py b/ansible_collections/community/general/plugins/modules/xattr.py
index 0b44fdaad..7a5f3b431 100644
--- a/ansible_collections/community/general/plugins/modules/xattr.py
+++ b/ansible_collections/community/general/plugins/modules/xattr.py
@@ -27,7 +27,6 @@ options:
path:
description:
- The full path of the file/object to get the facts of.
- - Before 2.3 this option was only usable as I(name).
type: path
required: true
aliases: [ name ]
@@ -42,27 +41,25 @@ options:
type: str
value:
description:
- - The value to set the named name/key to, it automatically sets the I(state) to C(present).
+ - The value to set the named name/key to, it automatically sets the O(state) to V(present).
type: str
state:
description:
- defines which state you want to do.
- C(read) retrieves the current value for a I(key) (default)
- C(present) sets I(path) to C(value), default if value is set
- C(all) dumps all data
- C(keys) retrieves all keys
- C(absent) deletes the key
+ V(read) retrieves the current value for a O(key) (default)
+ V(present) sets O(path) to O(value), default if value is set
+ V(all) dumps all data
+ V(keys) retrieves all keys
+ V(absent) deletes the key
type: str
choices: [ absent, all, keys, present, read ]
default: read
follow:
description:
- - If C(true), dereferences symlinks and sets/gets attributes on symlink target,
+ - If V(true), dereferences symlinks and sets/gets attributes on symlink target,
otherwise acts on symlink itself.
type: bool
default: true
-notes:
- - As of Ansible 2.3, the I(name) option has been changed to I(path) as default, but I(name) still works as well.
author:
- Brian Coca (@bcoca)
'''
diff --git a/ansible_collections/community/general/plugins/modules/xbps.py b/ansible_collections/community/general/plugins/modules/xbps.py
index 1fea5b384..bcbbb3f02 100644
--- a/ansible_collections/community/general/plugins/modules/xbps.py
+++ b/ansible_collections/community/general/plugins/modules/xbps.py
@@ -62,7 +62,7 @@ options:
- Whether or not to upgrade the xbps package when necessary.
Before installing new packages,
xbps requires the user to update the xbps package itself.
- Thus when this option is set to C(false),
+ Thus when this option is set to V(false),
upgrades and installations will fail when xbps is not up to date.
type: bool
default: true
diff --git a/ansible_collections/community/general/plugins/modules/xcc_redfish_command.py b/ansible_collections/community/general/plugins/modules/xcc_redfish_command.py
index 494ea061e..1e77d0f8d 100644
--- a/ansible_collections/community/general/plugins/modules/xcc_redfish_command.py
+++ b/ansible_collections/community/general/plugins/modules/xcc_redfish_command.py
@@ -592,8 +592,9 @@ class XCCRedfishUtils(RedfishUtils):
def raw_post_resource(self, resource_uri, request_body):
if resource_uri is None:
return {'ret': False, 'msg': "resource_uri is missing"}
+ resource_uri_has_actions = True
if '/Actions/' not in resource_uri:
- return {'ret': False, 'msg': "Bad uri %s. Keyword /Actions/ should be included in uri" % resource_uri}
+ resource_uri_has_actions = False
if request_body is None:
return {'ret': False, 'msg': "request_body is missing"}
# get action base uri data for further checking
@@ -602,7 +603,10 @@ class XCCRedfishUtils(RedfishUtils):
if response['ret'] is False:
return response
if 'Actions' not in response['data']:
- return {'ret': False, 'msg': "Actions property not found in %s" % action_base_uri}
+ if resource_uri_has_actions:
+ return {'ret': False, 'msg': "Actions property not found in %s" % action_base_uri}
+ else:
+ response['data']['Actions'] = {}
# check resouce_uri with target uri found in action base uri data
action_found = False
@@ -634,7 +638,7 @@ class XCCRedfishUtils(RedfishUtils):
else:
action_target_uri_list.append(response['data']['Actions']['Oem'][key]['target'])
- if not action_found:
+ if not action_found and resource_uri_has_actions:
return {'ret': False,
'msg': 'Specified resource_uri is not a supported action target uri, please specify a supported target uri instead. Supported uri: %s'
% (str(action_target_uri_list))}
diff --git a/ansible_collections/community/general/plugins/modules/xenserver_facts.py b/ansible_collections/community/general/plugins/modules/xenserver_facts.py
index 9924c4a9e..685522f49 100644
--- a/ansible_collections/community/general/plugins/modules/xenserver_facts.py
+++ b/ansible_collections/community/general/plugins/modules/xenserver_facts.py
@@ -135,7 +135,7 @@ def change_keys(recs, key='uuid', filter_func=None):
for param_name, param_value in rec.items():
# param_value may be of type xmlrpc.client.DateTime,
- # which is not simply convertable to str.
+ # which is not simply convertible to str.
# Use 'value' attr to get the str value,
# following an example in xmlrpc.client.DateTime document
if hasattr(param_value, "value"):
diff --git a/ansible_collections/community/general/plugins/modules/xenserver_guest.py b/ansible_collections/community/general/plugins/modules/xenserver_guest.py
index 7659ee2ae..110bc8875 100644
--- a/ansible_collections/community/general/plugins/modules/xenserver_guest.py
+++ b/ansible_collections/community/general/plugins/modules/xenserver_guest.py
@@ -25,26 +25,25 @@ notes:
Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your
Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub:
U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)'
-- 'If no scheme is specified in I(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
+- 'If no scheme is specified in O(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
accessing XenServer host in trusted environment or use C(https://) scheme explicitly.'
-- 'To use C(https://) scheme for I(hostname) you have to either import host certificate to your OS certificate store or use I(validate_certs): C(false)
+- 'To use C(https://) scheme for O(hostname) you have to either import host certificate to your OS certificate store or use O(validate_certs=false)
which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
-- 'Network configuration inside a guest OS, by using I(networks.type), I(networks.ip), I(networks.gateway) etc. parameters, is supported on
+- 'Network configuration inside a guest OS, by using O(networks[].type), O(networks[].ip), O(networks[].gateway) etc. parameters, is supported on
XenServer 7.0 or newer for Windows guests by using official XenServer Guest agent support for network configuration. The module will try to
detect if such support is available and utilize it, else it will use a custom method of configuration via xenstore. Since XenServer Guest
- agent only support None and Static types of network configuration, where None means DHCP configured interface, I(networks.type) and I(networks.type6)
- values C(none) and C(dhcp) have same effect. More info here:
+ agent only support None and Static types of network configuration, where None means DHCP configured interface, O(networks[].type) and O(networks[].type6)
+ values V(none) and V(dhcp) have same effect. More info here:
U(https://www.citrix.com/community/citrix-developer/citrix-hypervisor-developer/citrix-hypervisor-developing-products/citrix-hypervisor-staticip.html)'
- 'On platforms without official support for network configuration inside a guest OS, network parameters will be written to xenstore
- C(vm-data/networks/<vif_device>) key. Parameters can be inspected by using C(xenstore ls) and C(xenstore read) tools on \*nix guests or trough
+ C(vm-data/networks/<vif_device>) key. Parameters can be inspected by using C(xenstore ls) and C(xenstore read) tools on \*nix guests or through
WMI interface on Windows guests. They can also be found in VM facts C(instance.xenstore_data) key as returned by the module. It is up to the user
to implement a boot time scripts or custom agent that will read the parameters from xenstore and configure network with given parameters.
Take note that for xenstore data to become available inside a guest, a VM restart is needed hence module will require VM restart if any
- parameter is changed. This is a limitation of XenAPI and xenstore. Considering these limitations, network configuration trough xenstore is most
- useful for bootstraping newly deployed VMs, much less for reconfiguring existing ones. More info here:
+ parameter is changed. This is a limitation of XenAPI and xenstore. Considering these limitations, network configuration through xenstore is most
+ useful for bootstrapping newly deployed VMs, much less for reconfiguring existing ones. More info here:
U(https://support.citrix.com/article/CTX226713)'
requirements:
-- python >= 2.6
- XenAPI
attributes:
check_mode:
@@ -55,10 +54,10 @@ options:
state:
description:
- Specify the state VM should be in.
- - If I(state) is set to C(present) and VM exists, ensure the VM configuration conforms to given parameters.
- - If I(state) is set to C(present) and VM does not exist, then VM is deployed with given parameters.
- - If I(state) is set to C(absent) and VM exists, then VM is removed with its associated components.
- - If I(state) is set to C(poweredon) and VM does not exist, then VM is deployed with given parameters and powered on automatically.
+ - If O(state) is set to V(present) and VM exists, ensure the VM configuration conforms to given parameters.
+ - If O(state) is set to V(present) and VM does not exist, then VM is deployed with given parameters.
+ - If O(state) is set to V(absent) and VM exists, then VM is removed with its associated components.
+ - If O(state) is set to V(poweredon) and VM does not exist, then VM is deployed with given parameters and powered on automatically.
type: str
default: present
choices: [ present, absent, poweredon ]
@@ -66,7 +65,7 @@ options:
description:
- Name of the VM to work with.
- VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
- - In case of multiple VMs with same name, use I(uuid) to uniquely specify VM to manage.
+ - In case of multiple VMs with same name, use O(uuid) to uniquely specify VM to manage.
- This parameter is case sensitive.
type: str
aliases: [ name_label ]
@@ -84,7 +83,7 @@ options:
description:
- Name of a template, an existing VM (must be shut down) or a snapshot that should be used to create VM.
- Templates/VMs/snapshots on XenServer do not necessarily have unique names. The module will fail if multiple templates with same name are found.
- - In case of multiple templates/VMs/snapshots with same name, use I(template_uuid) to uniquely specify source template.
+ - In case of multiple templates/VMs/snapshots with same name, use O(template_uuid) to uniquely specify source template.
- If VM already exists, this setting will be ignored.
- This parameter is case sensitive.
type: str
@@ -117,7 +116,7 @@ options:
type: int
num_cpu_cores_per_socket:
description:
- - Number of Cores Per Socket. I(num_cpus) has to be a multiple of I(num_cpu_cores_per_socket).
+ - Number of Cores Per Socket. O(hardware.num_cpus) has to be a multiple of O(hardware.num_cpu_cores_per_socket).
type: int
memory_mb:
description:
@@ -128,7 +127,7 @@ options:
- A list of disks to add to VM.
- All parameters are case sensitive.
- Removing or detaching existing disks of VM is not supported.
- - New disks are required to have either a I(size) or one of I(size_[tb,gb,mb,kb,b]) parameters specified.
+ - New disks are required to have either a O(disks[].size) or one of O(ignore:disks[].size_[tb,gb,mb,kb,b]) parameters specified.
- VM needs to be shut down to reconfigure disk size.
type: list
elements: dict
@@ -136,7 +135,7 @@ options:
suboptions:
size:
description:
- - 'Disk size with unit. Unit must be: C(b), C(kb), C(mb), C(gb), C(tb). VM needs to be shut down to reconfigure this parameter.'
+ - 'Disk size with unit. Unit must be: V(b), V(kb), V(mb), V(gb), V(tb). VM needs to be shut down to reconfigure this parameter.'
- If no unit is specified, size is assumed to be in bytes.
type: str
size_b:
@@ -184,13 +183,13 @@ options:
suboptions:
type:
description:
- - The type of CD-ROM. With C(none) the CD-ROM device will be present but empty.
+ - The type of CD-ROM. With V(none) the CD-ROM device will be present but empty.
type: str
choices: [ none, iso ]
iso_name:
description:
- - 'The file name of an ISO image from one of the XenServer ISO Libraries (implies I(type): C(iso)).'
- - Required if I(type) is set to C(iso).
+ - 'The file name of an ISO image from one of the XenServer ISO Libraries (implies O(cdrom.type=iso)).'
+ - Required if O(cdrom.type) is set to V(iso).
type: str
networks:
description:
@@ -212,17 +211,17 @@ options:
type: str
type:
description:
- - Type of IPv4 assignment. Value C(none) means whatever is default for OS.
+ - Type of IPv4 assignment. Value V(none) means whatever is default for OS.
- On some operating systems it could be DHCP configured (e.g. Windows) or unconfigured interface (e.g. Linux).
type: str
choices: [ none, dhcp, static ]
ip:
description:
- - 'Static IPv4 address (implies I(type): C(static)). Can include prefix in format C(<IPv4 address>/<prefix>) instead of using C(netmask).'
+ - 'Static IPv4 address (implies O(networks[].type=static)). Can include prefix in format C(<IPv4 address>/<prefix>) instead of using C(netmask).'
type: str
netmask:
description:
- - Static IPv4 netmask required for I(ip) if prefix is not specified.
+ - Static IPv4 netmask required for O(networks[].ip) if prefix is not specified.
type: str
gateway:
description:
@@ -230,12 +229,12 @@ options:
type: str
type6:
description:
- - Type of IPv6 assignment. Value C(none) means whatever is default for OS.
+ - Type of IPv6 assignment. Value V(none) means whatever is default for OS.
type: str
choices: [ none, dhcp, static ]
ip6:
description:
- - 'Static IPv6 address (implies I(type6): C(static)) with prefix in format C(<IPv6 address>/<prefix>).'
+ - 'Static IPv6 address (implies O(networks[].type6=static)) with prefix in format C(<IPv6 address>/<prefix>).'
type: str
gateway6:
description:
@@ -249,8 +248,8 @@ options:
custom_params:
description:
- Define a list of custom VM params to set on VM.
- - Useful for advanced users familiar with managing VM params trough xe CLI.
- - A custom value object takes two fields I(key) and I(value) (see example below).
+ - Useful for advanced users familiar with managing VM params through xe CLI.
+ - A custom value object takes two fields O(custom_params[].key) and O(custom_params[].value) (see example below).
type: list
elements: dict
suboptions:
@@ -266,13 +265,13 @@ options:
required: true
wait_for_ip_address:
description:
- - Wait until XenServer detects an IP address for the VM. If I(state) is set to C(absent), this parameter is ignored.
+ - Wait until XenServer detects an IP address for the VM. If O(state) is set to V(absent), this parameter is ignored.
- This requires XenServer Tools to be preinstalled on the VM to work properly.
type: bool
default: false
state_change_timeout:
description:
- - 'By default, module will wait indefinitely for VM to accquire an IP address if I(wait_for_ip_address): C(true).'
+ - 'By default, module will wait indefinitely for VM to acquire an IP address if O(wait_for_ip_address=true).'
- If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change.
- In case of timeout, module will generate an error message.
type: int
@@ -301,7 +300,6 @@ EXAMPLES = r'''
hostname: "{{ xenserver_hostname }}"
username: "{{ xenserver_username }}"
password: "{{ xenserver_password }}"
- validate_certs: false
folder: /testvms
name: testvm_2
state: poweredon
@@ -328,7 +326,6 @@ EXAMPLES = r'''
hostname: "{{ xenserver_hostname }}"
username: "{{ xenserver_username }}"
password: "{{ xenserver_password }}"
- validate_certs: false
folder: /testvms
name: testvm_6
is_template: true
@@ -990,7 +987,7 @@ class XenServerVM(XenServerObject):
vif_device = vm_vif_params['device']
# A user could have manually changed network
- # or mac e.g. trough XenCenter and then also
+ # or mac e.g. through XenCenter and then also
# make those changes in playbook manually.
# In that case, module will not detect any
# changes and info in xenstore_data will
diff --git a/ansible_collections/community/general/plugins/modules/xenserver_guest_info.py b/ansible_collections/community/general/plugins/modules/xenserver_guest_info.py
index dd28cf7d0..68050f950 100644
--- a/ansible_collections/community/general/plugins/modules/xenserver_guest_info.py
+++ b/ansible_collections/community/general/plugins/modules/xenserver_guest_info.py
@@ -27,16 +27,14 @@ notes:
accessing XenServer host in trusted environment or use C(https://) scheme explicitly.'
- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no)
which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
-- This module was called C(xenserver_guest_facts) before Ansible 2.9. The usage did not change.
requirements:
-- python >= 2.6
- XenAPI
options:
name:
description:
- Name of the VM to gather facts from.
- VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
- - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage.
+ - In case of multiple VMs with same name, use O(uuid) to uniquely specify VM to manage.
- This parameter is case sensitive.
type: str
aliases: [ name_label ]
@@ -151,12 +149,6 @@ instance:
}
'''
-HAS_XENAPI = False
-try:
- import XenAPI # noqa: F401, pylint: disable=unused-import
- HAS_XENAPI = True
-except ImportError:
- pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XenServerObject, get_object_ref,
diff --git a/ansible_collections/community/general/plugins/modules/xenserver_guest_powerstate.py b/ansible_collections/community/general/plugins/modules/xenserver_guest_powerstate.py
index ba88bbf1d..c4e4f5976 100644
--- a/ansible_collections/community/general/plugins/modules/xenserver_guest_powerstate.py
+++ b/ansible_collections/community/general/plugins/modules/xenserver_guest_powerstate.py
@@ -28,7 +28,6 @@ notes:
- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no)
which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
requirements:
-- python >= 2.6
- XenAPI
attributes:
check_mode:
@@ -39,8 +38,8 @@ options:
state:
description:
- Specify the state VM should be in.
- - If C(state) is set to value other than C(present), then VM is transitioned into required state and facts are returned.
- - If C(state) is set to C(present), then VM is just checked for existence and facts are returned.
+ - If O(state) is set to value other than V(present), then VM is transitioned into required state and facts are returned.
+ - If O(state) is set to V(present), then VM is just checked for existence and facts are returned.
type: str
default: present
choices: [ powered-on, powered-off, restarted, shutdown-guest, reboot-guest, suspended, present ]
@@ -48,7 +47,7 @@ options:
description:
- Name of the VM to manage.
- VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
- - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage.
+ - In case of multiple VMs with same name, use O(uuid) to uniquely specify VM to manage.
- This parameter is case sensitive.
type: str
aliases: [ name_label ]
@@ -65,7 +64,7 @@ options:
default: false
state_change_timeout:
description:
- - 'By default, module will wait indefinitely for VM to change state or acquire an IP address if C(wait_for_ip_address: true).'
+ - 'By default, module will wait indefinitely for VM to change state or acquire an IP address if O(wait_for_ip_address=true).'
- If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change.
- In case of timeout, module will generate an error message.
type: int
@@ -177,12 +176,6 @@ instance:
}
'''
-HAS_XENAPI = False
-try:
- import XenAPI # noqa: F401, pylint: disable=unused-import
- HAS_XENAPI = True
-except ImportError:
- pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XenServerObject, get_object_ref,
diff --git a/ansible_collections/community/general/plugins/modules/xfconf.py b/ansible_collections/community/general/plugins/modules/xfconf.py
index 567117d40..8ed44c675 100644
--- a/ansible_collections/community/general/plugins/modules/xfconf.py
+++ b/ansible_collections/community/general/plugins/modules/xfconf.py
@@ -58,15 +58,15 @@ options:
value_type:
description:
- The type of value being set.
- - When providing more than one I(value_type), the length of the list must
- be equal to the length of I(value).
- - If only one I(value_type) is provided, but I(value) contains more than
- on element, that I(value_type) will be applied to all elements of I(value).
- - If the I(property) being set is an array and it can possibly have ony one
- element in the array, then I(force_array=true) must be used to ensure
+ - When providing more than one O(value_type), the length of the list must
+ be equal to the length of O(value).
+ - If only one O(value_type) is provided, but O(value) contains more than
+ on element, that O(value_type) will be applied to all elements of O(value).
+ - If the O(property) being set is an array and it can possibly have only one
+ element in the array, then O(force_array=true) must be used to ensure
that C(xfconf-query) will interpret the value as an array rather than a
scalar.
- - Support for C(uchar), C(char), C(uint64), and C(int64) has been added in community.general 4.8.0.
+ - Support for V(uchar), V(char), V(uint64), and V(int64) has been added in community.general 4.8.0.
type: list
elements: str
choices: [ string, int, double, bool, uint, uchar, char, uint64, int64, float ]
@@ -74,7 +74,7 @@ options:
type: str
description:
- The action to take upon the property/value.
- - The state C(get) has been removed in community.general 5.0.0. Please use the module M(community.general.xfconf_info) instead.
+ - The state V(get) has been removed in community.general 5.0.0. Please use the module M(community.general.xfconf_info) instead.
choices: [ present, absent ]
default: "present"
force_array:
@@ -84,13 +84,6 @@ options:
default: false
aliases: ['array']
version_added: 1.0.0
- disable_facts:
- description:
- - The value C(false) is no longer allowed since community.general 4.0.0.
- - This option is deprecated, and will be removed in community.general 8.0.0.
- type: bool
- default: true
- version_added: 2.1.0
'''
EXAMPLES = """
@@ -130,9 +123,8 @@ RETURN = '''
sample: "/Xft/DPI"
value_type:
description:
- - The type of the value that was changed (C(none) for C(reset)
- state). Either a single string value or a list of strings for array
- types.
+ - The type of the value that was changed (V(none) for O(state=reset)).
+ Either a single string value or a list of strings for array types.
- This is a string or a list of strings.
returned: success
type: any
@@ -181,7 +173,6 @@ class XFConfProperty(StateModuleHelper):
change_params = ('value', )
diff_params = ('value', )
output_params = ('property', 'channel', 'value')
- facts_params = ('property', 'channel', 'value')
module = dict(
argument_spec=dict(
state=dict(type='str', choices=("present", "absent"), default="present"),
@@ -191,11 +182,6 @@ class XFConfProperty(StateModuleHelper):
choices=('string', 'int', 'double', 'bool', 'uint', 'uchar', 'char', 'uint64', 'int64', 'float')),
value=dict(type='list', elements='raw'),
force_array=dict(type='bool', default=False, aliases=['array']),
- disable_facts=dict(
- type='bool', default=True,
- removed_in_version='8.0.0',
- removed_from_collection='community.general'
- ),
),
required_if=[('state', 'present', ['value', 'value_type'])],
required_together=[('value', 'value_type')],
@@ -204,20 +190,14 @@ class XFConfProperty(StateModuleHelper):
default_state = 'present'
- def update_xfconf_output(self, **kwargs):
- self.update_vars(meta={"output": True, "fact": True}, **kwargs)
-
def __init_module__(self):
self.runner = xfconf_runner(self.module)
self.does_not = 'Property "{0}" does not exist on channel "{1}".'.format(self.vars.property,
self.vars.channel)
- self.vars.set('previous_value', self._get(), fact=True)
- self.vars.set('type', self.vars.value_type, fact=True)
+ self.vars.set('previous_value', self._get())
+ self.vars.set('type', self.vars.value_type)
self.vars.meta('value').set(initial_value=self.vars.previous_value)
- if self.vars.disable_facts is False:
- self.do_raise('Returning results as facts has been removed. Stop using disable_facts=false.')
-
def process_command_output(self, rc, out, err):
if err.rstrip() == self.does_not:
return None
diff --git a/ansible_collections/community/general/plugins/modules/xfconf_info.py b/ansible_collections/community/general/plugins/modules/xfconf_info.py
index 0a99201ef..844ef3c11 100644
--- a/ansible_collections/community/general/plugins/modules/xfconf_info.py
+++ b/ansible_collections/community/general/plugins/modules/xfconf_info.py
@@ -36,8 +36,8 @@ options:
- >
A Xfce preference key is an element in the Xfconf repository
that corresponds to an application preference.
- - If provided, then I(channel) is required.
- - If not provided and a I(channel) is provided, then the module will list all available properties in that I(channel).
+ - If provided, then O(channel) is required.
+ - If not provided and a O(channel) is provided, then the module will list all available properties in that O(channel).
type: str
notes:
- See man xfconf-query(1) for more details.
@@ -82,7 +82,7 @@ RETURN = '''
properties:
description:
- List of available properties for a specific channel.
- - Returned by passing only the I(channel) parameter to the module.
+ - Returned by passing only the O(channel) parameter to the module.
returned: success
type: list
elements: str
diff --git a/ansible_collections/community/general/plugins/modules/xml.py b/ansible_collections/community/general/plugins/modules/xml.py
index 5b9bba355..a3c12b8ee 100644
--- a/ansible_collections/community/general/plugins/modules/xml.py
+++ b/ansible_collections/community/general/plugins/modules/xml.py
@@ -29,18 +29,18 @@ options:
description:
- Path to the file to operate on.
- This file must exist ahead of time.
- - This parameter is required, unless I(xmlstring) is given.
+ - This parameter is required, unless O(xmlstring) is given.
type: path
aliases: [ dest, file ]
xmlstring:
description:
- A string containing XML on which to operate.
- - This parameter is required, unless I(path) is given.
+ - This parameter is required, unless O(path) is given.
type: str
xpath:
description:
- A valid XPath expression describing the item(s) you want to manipulate.
- - Operates on the document root, C(/), by default.
+ - Operates on the document root, V(/), by default.
type: str
namespaces:
description:
@@ -57,43 +57,43 @@ options:
aliases: [ ensure ]
attribute:
description:
- - The attribute to select when using parameter I(value).
- - This is a string, not prepended with C(@).
+ - The attribute to select when using parameter O(value).
+ - This is a string, not prepended with V(@).
type: raw
value:
description:
- Desired state of the selected attribute.
- - Either a string, or to unset a value, the Python C(None) keyword (YAML Equivalent, C(null)).
+ - Either a string, or to unset a value, the Python V(None) keyword (YAML Equivalent, V(null)).
- Elements default to no value (but present).
- Attributes default to an empty string.
type: raw
add_children:
description:
- - Add additional child-element(s) to a selected element for a given I(xpath).
+ - Add additional child-element(s) to a selected element for a given O(xpath).
- Child elements must be given in a list and each item may be either a string
- (eg. C(children=ansible) to add an empty C(<ansible/>) child element),
+ (for example C(children=ansible) to add an empty C(<ansible/>) child element),
or a hash where the key is an element name and the value is the element value.
- - This parameter requires I(xpath) to be set.
+ - This parameter requires O(xpath) to be set.
type: list
elements: raw
set_children:
description:
- - Set the child-element(s) of a selected element for a given I(xpath).
+ - Set the child-element(s) of a selected element for a given O(xpath).
- Removes any existing children.
- - Child elements must be specified as in I(add_children).
- - This parameter requires I(xpath) to be set.
+ - Child elements must be specified as in O(add_children).
+ - This parameter requires O(xpath) to be set.
type: list
elements: raw
count:
description:
- - Search for a given I(xpath) and provide the count of any matches.
- - This parameter requires I(xpath) to be set.
+ - Search for a given O(xpath) and provide the count of any matches.
+ - This parameter requires O(xpath) to be set.
type: bool
default: false
print_match:
description:
- - Search for a given I(xpath) and print out any matches.
- - This parameter requires I(xpath) to be set.
+ - Search for a given O(xpath) and print out any matches.
+ - This parameter requires O(xpath) to be set.
type: bool
default: false
pretty_print:
@@ -103,13 +103,13 @@ options:
default: false
content:
description:
- - Search for a given I(xpath) and get content.
- - This parameter requires I(xpath) to be set.
+ - Search for a given O(xpath) and get content.
+ - This parameter requires O(xpath) to be set.
type: str
choices: [ attribute, text ]
input_type:
description:
- - Type of input for I(add_children) and I(set_children).
+ - Type of input for O(add_children) and O(set_children).
type: str
choices: [ xml, yaml ]
default: yaml
@@ -127,20 +127,20 @@ options:
default: false
insertbefore:
description:
- - Add additional child-element(s) before the first selected element for a given I(xpath).
+ - Add additional child-element(s) before the first selected element for a given O(xpath).
- Child elements must be given in a list and each item may be either a string
- (eg. C(children=ansible) to add an empty C(<ansible/>) child element),
+ (for example C(children=ansible) to add an empty C(<ansible/>) child element),
or a hash where the key is an element name and the value is the element value.
- - This parameter requires I(xpath) to be set.
+ - This parameter requires O(xpath) to be set.
type: bool
default: false
insertafter:
description:
- - Add additional child-element(s) after the last selected element for a given I(xpath).
+ - Add additional child-element(s) after the last selected element for a given O(xpath).
- Child elements must be given in a list and each item may be either a string
- (eg. C(children=ansible) to add an empty C(<ansible/>) child element),
+ (for example C(children=ansible) to add an empty C(<ansible/>) child element),
or a hash where the key is an element name and the value is the element value.
- - This parameter requires I(xpath) to be set.
+ - This parameter requires O(xpath) to be set.
type: bool
default: false
requirements:
@@ -149,7 +149,7 @@ notes:
- Use the C(--check) and C(--diff) options when testing your expressions.
- The diff output is automatically pretty-printed, so may not reflect the actual file content, only the file structure.
- This module does not handle complicated xpath expressions, so limit xpath selectors to simple expressions.
-- Beware that in case your XML elements are namespaced, you need to use the I(namespaces) parameter, see the examples.
+- Beware that in case your XML elements are namespaced, you need to use the O(namespaces) parameter, see the examples.
- Namespaces prefix should be used for all children of an element where namespace is defined, unless another namespace is defined for them.
seealso:
- name: Xml module development community wiki
@@ -338,7 +338,7 @@ actions:
backup_file:
description: The name of the backup file that was created
type: str
- returned: when I(backup=true)
+ returned: when O(backup=true)
sample: /path/to/file.xml.1942.2017-08-24@14:16:01~
count:
description: The count of xpath matches.
diff --git a/ansible_collections/community/general/plugins/modules/yum_versionlock.py b/ansible_collections/community/general/plugins/modules/yum_versionlock.py
index e5d32dc77..0cbf9be39 100644
--- a/ansible_collections/community/general/plugins/modules/yum_versionlock.py
+++ b/ansible_collections/community/general/plugins/modules/yum_versionlock.py
@@ -1,6 +1,6 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright (c) 2018, Florian Paul Azim Hoberg <florian.hoberg@credativ.de>
+# Copyright (c) 2018, Florian Paul Azim Hoberg (@gyptazy) <gyptazy@gyptazy.ch>
#
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
@@ -25,14 +25,15 @@ attributes:
options:
name:
description:
- - Package name or a list of package names with optional wildcards.
+ - Package name or a list of package names with optional version or wildcards.
+ - Specifying versions is supported since community.general 7.2.0.
type: list
required: true
elements: str
state:
description:
- - If state is C(present), package(s) will be added to yum versionlock list.
- - If state is C(absent), package(s) will be removed from yum versionlock list.
+ - If state is V(present), package(s) will be added to yum versionlock list.
+ - If state is V(absent), package(s) will be removed from yum versionlock list.
choices: [ 'absent', 'present' ]
type: str
default: present
@@ -50,7 +51,14 @@ EXAMPLES = r'''
- name: Prevent Apache / httpd from being updated
community.general.yum_versionlock:
state: present
- name: httpd
+ name:
+ - httpd
+
+- name: Prevent Apache / httpd version 2.4.57-2 from being updated
+ community.general.yum_versionlock:
+ state: present
+ name:
+ - httpd-0:2.4.57-2.el9
- name: Prevent multiple packages from being updated
community.general.yum_versionlock:
@@ -111,22 +119,30 @@ class YumVersionLock:
def ensure_state(self, packages, command):
""" Ensure packages state """
rc, out, err = self.module.run_command([self.yum_bin, "-q", "versionlock", command] + packages)
+ # If no package can be found this will be written on stdout with rc 0
+ if 'No package found for' in out:
+ self.module.fail_json(msg=out)
if rc == 0:
return True
self.module.fail_json(msg="Error: " + to_native(err) + to_native(out))
def match(entry, name):
+ match = False
m = NEVRA_RE_YUM.match(entry)
if not m:
m = NEVRA_RE_DNF.match(entry)
if not m:
return False
- return fnmatch(m.group("name"), name)
+ if fnmatch(m.group("name"), name):
+ match = True
+ if entry.rstrip('.*') == name:
+ match = True
+ return match
def main():
- """ start main program to add/remove a package to yum versionlock"""
+ """ start main program to add/delete a package to yum versionlock """
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent']),
diff --git a/ansible_collections/community/general/plugins/modules/zfs.py b/ansible_collections/community/general/plugins/modules/zfs.py
index 4cd79c36e..f23cc4580 100644
--- a/ansible_collections/community/general/plugins/modules/zfs.py
+++ b/ansible_collections/community/general/plugins/modules/zfs.py
@@ -24,19 +24,19 @@ attributes:
- In certain situations it may report a task as changed that will not be reported
as changed when C(check_mode) is disabled.
- For example, this might occur when the zpool C(altroot) option is set or when
- a size is written using human-readable notation, such as C(1M) or C(1024K),
- instead of as an unqualified byte count, such as C(1048576).
+ a size is written using human-readable notation, such as V(1M) or V(1024K),
+ instead of as an unqualified byte count, such as V(1048576).
diff_mode:
support: full
options:
name:
description:
- - File system, snapshot or volume name e.g. C(rpool/myfs).
+ - File system, snapshot or volume name, for example V(rpool/myfs).
required: true
type: str
state:
description:
- - Whether to create (C(present)), or remove (C(absent)) a
+ - Whether to create (V(present)), or remove (V(absent)) a
file system, snapshot or volume. All parents/children
will be created/destroyed as needed to reach the desired state.
choices: [ absent, present ]
diff --git a/ansible_collections/community/general/plugins/modules/zfs_delegate_admin.py b/ansible_collections/community/general/plugins/modules/zfs_delegate_admin.py
index 0536f1a28..24f742220 100644
--- a/ansible_collections/community/general/plugins/modules/zfs_delegate_admin.py
+++ b/ansible_collections/community/general/plugins/modules/zfs_delegate_admin.py
@@ -15,7 +15,7 @@ short_description: Manage ZFS delegated administration (user admin privileges)
description:
- Manages ZFS file system delegated administration permissions, which allow unprivileged users to perform ZFS
operations normally restricted to the superuser.
- - See the C(zfs allow) section of C(zfs(1M)) for detailed explanations of options.
+ - See the C(zfs allow) section of V(zfs(1M\)) for detailed explanations of options.
- This module attempts to adhere to the behavior of the command line tool as much as possible.
requirements:
- "A ZFS/OpenZFS implementation that supports delegation with C(zfs allow), including: Solaris >= 10, illumos (all
@@ -30,14 +30,14 @@ attributes:
options:
name:
description:
- - File system or volume name e.g. C(rpool/myfs).
+ - File system or volume name, for example V(rpool/myfs).
required: true
type: str
state:
description:
- - Whether to allow (C(present)), or unallow (C(absent)) a permission.
- - When set to C(present), at least one "entity" param of I(users), I(groups), or I(everyone) are required.
- - When set to C(absent), removes permissions from the specified entities, or removes all permissions if no entity params are specified.
+ - Whether to allow (V(present)), or unallow (V(absent)) a permission.
+ - When set to V(present), at least one "entity" param of O(users), O(groups), or O(everyone) are required.
+ - When set to V(absent), removes permissions from the specified entities, or removes all permissions if no entity params are specified.
choices: [ absent, present ]
default: present
type: str
@@ -58,22 +58,22 @@ options:
default: false
permissions:
description:
- - The list of permission(s) to delegate (required if C(state) is C(present)).
+ - The list of permission(s) to delegate (required if O(state=present)).
- Supported permissions depend on the ZFS version in use. See for example
U(https://openzfs.github.io/openzfs-docs/man/8/zfs-allow.8.html) for OpenZFS.
type: list
elements: str
local:
description:
- - Apply permissions to C(name) locally (C(zfs allow -l)).
+ - Apply permissions to O(name) locally (C(zfs allow -l)).
type: bool
descendents:
description:
- - Apply permissions to C(name)'s descendents (C(zfs allow -d)).
+ - Apply permissions to O(name)'s descendents (C(zfs allow -d)).
type: bool
recursive:
description:
- - Unallow permissions recursively (ignored when C(state) is C(present)).
+ - Unallow permissions recursively (ignored when O(state=present)).
type: bool
default: false
author:
diff --git a/ansible_collections/community/general/plugins/modules/znode.py b/ansible_collections/community/general/plugins/modules/znode.py
index f5aa54ef8..e8f7f1dc7 100644
--- a/ansible_collections/community/general/plugins/modules/znode.py
+++ b/ansible_collections/community/general/plugins/modules/znode.py
@@ -66,9 +66,9 @@ options:
version_added: 5.8.0
auth_credential:
description:
- - The authentication credential value. Depends on I(auth_scheme).
- - The format for I(auth_scheme=digest) is C(user:password),
- and the format for I(auth_scheme=sasl) is C(user:password).
+ - The authentication credential value. Depends on O(auth_scheme).
+ - The format for O(auth_scheme=digest) is C(user:password),
+ and the format for O(auth_scheme=sasl) is C(user:password).
type: str
required: false
version_added: 5.8.0
@@ -81,7 +81,6 @@ options:
version_added: '6.5.0'
requirements:
- kazoo >= 2.1
- - python >= 2.6
author: "Trey Perry (@treyperry)"
'''
diff --git a/ansible_collections/community/general/plugins/modules/zypper.py b/ansible_collections/community/general/plugins/modules/zypper.py
index b47131d3d..fae859fe7 100644
--- a/ansible_collections/community/general/plugins/modules/zypper.py
+++ b/ansible_collections/community/general/plugins/modules/zypper.py
@@ -42,22 +42,22 @@ attributes:
options:
name:
description:
- - Package name C(name) or package specifier or a list of either.
- - Can include a version like C(name=1.0), C(name>3.4) or C(name<=2.7). If a version is given, C(oldpackage) is implied and zypper is allowed to
+ - Package name V(name) or package specifier or a list of either.
+ - Can include a version like V(name=1.0), V(name>3.4) or V(name<=2.7). If a version is given, V(oldpackage) is implied and zypper is allowed to
update the package within the version range given.
- You can also pass a url or a local path to a rpm file.
- - When using I(state=latest), this can be '*', which updates all installed packages.
+ - When using O(state=latest), this can be '*', which updates all installed packages.
required: true
aliases: [ 'pkg' ]
type: list
elements: str
state:
description:
- - C(present) will make sure the package is installed.
- C(latest) will make sure the latest version of the package is installed.
- C(absent) will make sure the specified package is not installed.
- C(dist-upgrade) will make sure the latest version of all installed packages from all enabled repositories is installed.
- - When using C(dist-upgrade), I(name) should be C('*').
+ - V(present) will make sure the package is installed.
+ V(latest) will make sure the latest version of the package is installed.
+ V(absent) will make sure the specified package is not installed.
+ V(dist-upgrade) will make sure the latest version of all installed packages from all enabled repositories is installed.
+ - When using V(dist-upgrade), O(name) should be V('*').
required: false
choices: [ present, latest, absent, dist-upgrade, installed, removed ]
default: "present"
@@ -78,14 +78,14 @@ options:
disable_gpg_check:
description:
- Whether to disable to GPG signature checking of the package
- signature being installed. Has an effect only if state is
- I(present) or I(latest).
+ signature being installed. Has an effect only if O(state) is
+ V(present) or V(latest).
required: false
default: false
type: bool
disable_recommends:
description:
- - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(true)) modifies zypper's default behavior; C(false) does
+ - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (V(true)) modifies zypper's default behavior; V(false) does
install recommended packages.
required: false
default: true
@@ -146,7 +146,7 @@ options:
version_added: '4.6.0'
notes:
- When used with a C(loop:) each package will be processed individually,
- it is much more efficient to pass the list directly to the I(name) option.
+ it is much more efficient to pass the list directly to the O(name) option.
# informational: requirements for nodes
requirements:
- "zypper >= 1.0 # included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0"
@@ -205,7 +205,7 @@ EXAMPLES = '''
allow_vendor_change: true
extra_args: '--allow-arch-change'
-- name: Perform a installaion of nmap with the install option replacefiles
+- name: Perform a installation of nmap with the install option replacefiles
community.general.zypper:
name: 'nmap'
state: latest
diff --git a/ansible_collections/community/general/plugins/modules/zypper_repository.py b/ansible_collections/community/general/plugins/modules/zypper_repository.py
index cccd9c579..5a0356cc3 100644
--- a/ansible_collections/community/general/plugins/modules/zypper_repository.py
+++ b/ansible_collections/community/general/plugins/modules/zypper_repository.py
@@ -47,8 +47,7 @@ options:
disable_gpg_check:
description:
- Whether to disable GPG signature checking of
- all packages. Has an effect only if state is
- I(present).
+ all packages. Has an effect only if O(state=present).
- Needs zypper version >= 1.6.2.
type: bool
default: false
@@ -73,7 +72,7 @@ options:
auto_import_keys:
description:
- Automatically import the gpg signing key of the new or changed repository.
- - Has an effect only if state is I(present). Has no effect on existing (unchanged) repositories or in combination with I(absent).
+ - Has an effect only if O(state=present). Has no effect on existing (unchanged) repositories or in combination with O(state=absent).
- Implies runrefresh.
- Only works with C(.repo) files if `name` is given explicitly.
type: bool
diff --git a/ansible_collections/community/general/plugins/test/fqdn_valid.py b/ansible_collections/community/general/plugins/test/fqdn_valid.py
new file mode 100644
index 000000000..1ec774207
--- /dev/null
+++ b/ansible_collections/community/general/plugins/test/fqdn_valid.py
@@ -0,0 +1,103 @@
+# Copyright (c) 2023, Vladimir Botka <vbotka@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.six import raise_from
+
+try:
+ from fqdn import FQDN
+except ImportError as imp_exc:
+ ANOTHER_LIBRARY_IMPORT_ERROR = imp_exc
+else:
+ ANOTHER_LIBRARY_IMPORT_ERROR = None
+
+
+DOCUMENTATION = '''
+ name: fqdn_valid
+ short_description: Validates fully-qualified domain names against RFC 1123
+ version_added: 8.1.0
+ author: Vladimir Botka (@vbotka)
+ requirements:
+ - fqdn>=1.5.1 (PyPI)
+ description:
+ - This test validates Fully Qualified Domain Names (FQDNs)
+ conforming to the Internet Engineering Task Force specification
+ RFC 1123 and RFC 952.
+ - The design intent is to validate that a string would be
+ traditionally acceptable as a public Internet hostname to
+ RFC-conforming software, which is a strict subset of the logic
+ in modern web browsers like Mozilla Firefox and Chromium that
+ determines whether make a DNS lookup.
+ - Certificate Authorities like Let's Encrypt run a narrower set of
+ string validation logic to determine validity for issuance. This
+ test is not intended to achieve functional parity with CA
+ issuance.
+ - Single label names are allowed by default (O(min_labels=1)).
+ options:
+ _input:
+ description: Name of the host.
+ type: str
+ required: true
+ min_labels:
+ description: Required minimum of labels, separated by period.
+ default: 1
+ type: int
+ required: false
+ allow_underscores:
+ description: Allow underscore characters.
+ default: false
+ type: bool
+ required: false
+'''
+
+EXAMPLES = '''
+- name: Make sure that hostname is valid
+ ansible.builtin.assert:
+ that: hostname is community.general.fqdn_valid
+
+- name: Make sure that hostname is at least 3 labels long (a.b.c)
+ ansible.builtin.assert:
+ that: hostname is community.general.fqdn_valid(min_labels=3)
+
+- name: Make sure that hostname is at least 2 labels long (a.b). Allow '_'
+ ansible.builtin.assert:
+ that: hostname is community.general.fqdn_valid(min_labels=2, allow_underscores=True)
+'''
+
+RETURN = '''
+ _value:
+ description: Whether the name is valid.
+ type: bool
+'''
+
+
+def fqdn_valid(name, min_labels=1, allow_underscores=False):
+ """
+ Example:
+ - 'srv.example.com' is community.general.fqdn_valid
+ - 'foo_bar.example.com' is community.general.fqdn_valid(allow_underscores=True)
+ """
+
+ if ANOTHER_LIBRARY_IMPORT_ERROR:
+ raise_from(
+ AnsibleError('Python package fqdn must be installed to use this test.'),
+ ANOTHER_LIBRARY_IMPORT_ERROR
+ )
+
+ fobj = FQDN(name, min_labels=min_labels, allow_underscores=allow_underscores)
+ return (fobj.is_valid)
+
+
+class TestModule(object):
+ ''' Ansible test hostname validity.
+ https://pypi.org/project/fqdn/
+ '''
+
+ def tests(self):
+ return {
+ 'fqdn_valid': fqdn_valid,
+ }